hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
611e9bac2de16e979190f3bb965ee62155b76e30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <cudf/cudf.h>
#include <cudf/utilities/error.hpp>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, cudf::valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows);
__global__ void determineValidRecCount(cudf::valid_type *validArray, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(cudf::valid_type data, int bit) {
cudf::valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
cudf::size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// cudf::size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
cudf::size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(hipMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
hipLaunchKernelGGL(( determineValidRecCount), dim3(blocks), dim3(threads), 0, 0, gdfData[x]->valid, numRows, numCol, offsets);
}
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(rmm::exec_policy()->on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( hipMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), hipMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
cudf::size_type* IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(cudf::size_type), 0));
CUDA_TRY(hipMemcpy(IA, offsets, ( sizeof(cudf::size_type) * (numRows + 2) ), hipMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets) {
cudf::size_type numCols = csrReturn->cols;
cudf::size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(hipMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( cudf::size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
hipLaunchKernelGGL(( cudaCreateCSR<T>), dim3(blocks), dim3(threads), 0, 0, gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, cudf::valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
cudf::size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(cudf::valid_type *valid, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
| 611e9bac2de16e979190f3bb965ee62155b76e30.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file gdf-csr.cu code to convert a GDF matrix into a CSR
*
*/
#include <cudf/cudf.h>
#include <cudf/utilities/error.hpp>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
using namespace std;
//--- all the private functions
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets);
//--- private CUDA functions / kernels
template<typename T>
__global__ void cudaCreateCSR(void *data, cudf::valid_type *valid, gdf_dtype dtype, int colID, T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows);
__global__ void determineValidRecCount(cudf::valid_type *validArray, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset);
template<typename T>
__device__ T convertDataElement(gdf_column *gdf, int idx, gdf_dtype dtype);
__device__ int whichBitmapCSR(int record) { return (record/8); }
__device__ int whichBitCSR(int bit) { return (bit % 8); }
__device__ int checkBitCSR(cudf::valid_type data, int bit) {
cudf::valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
return (data & bitMask[bit]);
}
//
//------------------------------------------------------------
//
/*
* Convert a Dense GDF into a CSR GDF
*
* Restrictions: All columns need to be of the same length
*/
/**
* @brief convert a GDF into a CSR
*
* Take a matrix in GDF format and convert it into a CSR. The column major matrix needs to have every column defined.
* Passing in a COO datset will be treated as a two column matrix
*
* @param[in] gdfData the ordered list of columns
* @param[in] numCol the number of columns in the gdfData array
*
* @param[out] csrReturn a pointer to the returned data structure
*
* @return gdf_error code
*/
gdf_error gdf_to_csr(gdf_column **gdfData, int numCol, csr_gdf *csrReturn) {
int64_t numNull = 0;
int64_t nnz = 0;
cudf::size_type numRows = gdfData[0]->size;
gdf_dtype dType = gdf_dtype::GDF_invalid; // the data type to make the CSR element array (A)
/**
* Currently the gdf_dtype enum is arranged based on data size, as long as it stays that way the enum values can be
* exploited by just picking the largest enum value
*
* While looping, also get the number of null values (this will work one day)
*/
for ( int x =0; x < numCol; x++) {
if( gdfData[x]->dtype > dType)
dType = gdfData[x]->dtype;
numNull += gdfData[x]->null_count;
}
if (dType == gdf_dtype::GDF_invalid || dType == gdf_dtype::GDF_STRING )
return gdf_error::GDF_UNSUPPORTED_DTYPE;
// the number of valid elements is simple the max number of possible elements (rows * columns) minus the number of nulls
// the current problem is that algorithms are not setting null_count;
// cudf::size_type is 32bits (int) but the total size could be larger than an int, so use a long
nnz = (numRows * numCol) - numNull;
// Allocate space for the offset - this will eventually be IA - dtype is long since the sum of all column elements could be larger than int32
cudf::size_type * offsets;
RMM_TRY(RMM_ALLOC((void**)&offsets, (numRows + 2) * sizeof(int64_t), 0)); // TODO: non-default stream?
CUDA_TRY(cudaMemset(offsets, 0, ( sizeof(int64_t) * (numRows + 2) ) ));
// do a pass over each columns, and have each column updates the row count
//-- threads and blocks
int threads = 1024;
int blocks = (numRows + threads - 1) / threads;
for ( int x = 0; x < numCol; x++ ) {
determineValidRecCount<<<blocks, threads>>>(gdfData[x]->valid, numRows, numCol, offsets);
}
//--------------------------------------------------------------------------------------
// Now do an exclusive scan to compute the offsets for where to write data
thrust::exclusive_scan(rmm::exec_policy()->on(0), offsets, (offsets + numRows + 1), offsets);
//--------------------------------------------------------------------------------------
// get the number of elements - NNZ, this is the last item in the array
CUDA_TRY( cudaMemcpy((void *)&nnz, (void *)&offsets[numRows], sizeof(int64_t), cudaMemcpyDeviceToHost) );
if ( nnz == 0)
return GDF_CUDA_ERROR;
//--------------------------------------------------------------------------------------
// now start creating output data
cudf::size_type* IA;
RMM_TRY(RMM_ALLOC((void**)&IA, (numRows + 2) * sizeof(cudf::size_type), 0));
CUDA_TRY(cudaMemcpy(IA, offsets, ( sizeof(cudf::size_type) * (numRows + 2) ), cudaMemcpyDeviceToDevice) );
int64_t * JA;
RMM_TRY( RMM_ALLOC((void**)&JA, (sizeof(int64_t) * nnz), 0));
//----------------------------------------------------------------------------------
// Now just missing A and the moving of data
csrReturn->dtype = dType;
csrReturn->rows = numRows;
csrReturn->cols = numCol;
csrReturn->dtype = dType;
csrReturn->JA = JA;
csrReturn->IA = IA;
csrReturn->nnz = nnz;
// Start processing based on data type
gdf_error status = GDF_SUCCESS;
switch(dType) {
case gdf_dtype::GDF_INT8:
status = runConverter<int8_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT16:
status = runConverter<int16_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT32:
status = runConverter<int32_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_INT64:
status = runConverter<int64_t>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT32:
status = runConverter<float>(gdfData, csrReturn, offsets);
break;
case gdf_dtype::GDF_FLOAT64:
status = runConverter<double>(gdfData, csrReturn, offsets);
break;
default:
RMM_TRY(RMM_FREE(IA, 0));
RMM_TRY(RMM_FREE(JA, 0));
RMM_TRY(RMM_FREE(offsets, 0));
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY(RMM_FREE(offsets, 0));
return status;
}
template<typename T>
gdf_error runConverter(gdf_column **gdfData, csr_gdf *csrReturn, cudf::size_type * offsets) {
cudf::size_type numCols = csrReturn->cols;
cudf::size_type numRows = csrReturn->rows;
//-- threads and blocks
int threads = 1024;
if ( numRows < 100 ) {
threads = 64;
} else if (numRows < 256) {
threads = 128;
} else if ( numRows < 512) {
threads = 256;
} else if ( numRows < 1024) {
threads = 512;
}
int blocks = (numRows + threads - 1) / threads;
T * A;
RMM_TRY(RMM_ALLOC((void**)&A, (sizeof(T) * csrReturn->nnz), 0));
CUDA_TRY(cudaMemset(A, 0, (sizeof(T) * csrReturn->nnz)));
// Now start moving the data and creating the CSR
for ( cudf::size_type colId = 0; colId < numCols; colId++ ) {
gdf_column *gdf = gdfData[colId];
cudaCreateCSR<T><<<blocks, threads>>>(gdf->data, gdf->valid, gdf->dtype, colId, A, csrReturn->JA, offsets, numRows);
CUDA_CHECK_LAST();
}
csrReturn->A = A;
return gdf_error::GDF_SUCCESS;
}
/*
* Move data over into CSR and possible convert format
*/
template<typename T>
__global__ void cudaCreateCSR(
void *data, cudf::valid_type *valid, gdf_dtype dtype, int colId,
T *A, int64_t *JA, cudf::size_type *offsets, cudf::size_type numRows)
{
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // which bitmap
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if ( checkBitCSR( bitmap, bitIdx) ) {
cudf::size_type offsetIdx = offsets[tid]; // where should this thread start writing data
A[offsetIdx] = convertDataElement<T>(data, tid, dtype);
JA[offsetIdx] = colId;
++offsets[tid];
}
}
/*
* Compute the number of valid entries per rows - a row spans multiple gdf_colums -
* There is one thread running per row, so just compute the sum for this row.
*
* the number of elements a valid array is actually ceil(numRows / 8) since it is a bitmap. the total number of bits checked is equal to numRows
*
*/
__global__ void determineValidRecCount(cudf::valid_type *valid, cudf::size_type numRows, cudf::size_type numCol, cudf::size_type * offset) {
int tid = threadIdx.x + (blockDim.x * blockIdx.x); // get the tread ID which is also the row number
if ( tid >= numRows)
return;
int bitmapIdx = whichBitmapCSR(tid); // want the floor of the divide
int bitIdx = whichBitCSR(tid); // which bit - over an 8-bit index
cudf::valid_type bitmap = valid[bitmapIdx];
if (checkBitCSR( bitmap, bitIdx) )
++offset[tid];
}
/**
* Convert the data element into a common format
*/
template<typename T>
__device__ T convertDataElement(void *data, int tid, gdf_dtype dtype) {
T answer;
switch(dtype) {
case gdf_dtype::GDF_INT8: {
int8_t *a = (int8_t *)data;
answer = (T)(a[tid]);
break;
}
case gdf_dtype::GDF_INT16: {
int16_t *b = (int16_t *)data;
answer = (T)(b[tid]);
break;
}
case gdf_dtype::GDF_INT32: {
int32_t *c = (int32_t *)data;
answer = (T)(c[tid]);
break;
}
case gdf_dtype::GDF_INT64: {
int64_t *d = (int64_t *)data;
answer = (T)(d[tid]);
break;
}
case gdf_dtype::GDF_FLOAT32: {
float *e = (float *)data;
answer = (T)(e[tid]);
break;
}
case gdf_dtype::GDF_FLOAT64: {
double *f = (double *)data;
answer = (T)(f[tid]);
break;
}
}
return answer;
}
|
4d055ececc25ef685714fbdb8710ab9ca9c1f507.hip | // !!! This is a file automatically generated by hipify!!!
#include "objective_function.h"
#include "fourier_filter.h"
void cuda_newton_gmres( objective_function *obj, double* state_d, int *max_iterations, double threshold, void *params, int inner, int outer, double *gmres_residual_series, double *normF_series, double *normJtF_series ){
/*
PURPOSE:
The purpose of this function is to find a zero (or minimum) of an objective function depedend on a large number of real variables.
Traditional generalized Newton's method relies on inverting the Jacobian or computing a psuedo-inverse. When the number of variables is large (>10^6),
this computation is infeasible on useful timescales. GMRES is used to approximate a Newton step.
INPUT:
objective_function *obj - the function we are trying to minimize.
gsl_vector *state - the initial guess for the minimal point
int max_iterations - the number of Newton steps this function is allowed to take.
double threshold - Newton ill exit if the norm of the objective function falls below this threshold
void *params - a pointer to parameters needed by the objective function. NULL if you don't need anything
int inner - number of inner iterations used in GMRES
OUTPUT:
double *state_d - will be overwritten to contain the new estimate of minimum
*/
clock_t start, stop; //Used for timing GMRES
start = clock();
int input_dim = obj->input_dim;
int output_dim = obj->output_dim;
if( output_dim < input_dim ){
printf("GMRES did not launch: Your problem is underconstrained.\n");
return;
}
int overconstrained = output_dim > input_dim;
double normF, normJtF;
double residual;
double h_element;
//CUBLAS requires a handle
hipblasHandle_t handle;
hipblasCreate( &handle );
//GPU VECTORS
double *F_d; //objective function on the GPU
double *JtF_d; //J'*F on the GPU. Used to minimize over-constrained problems
double *Jq_d;
double *JtJq_d;
double *step_d;
double *q_col_d; //Doesn't need allocated
double *q_col2_d; //Doesn't need allocated
double *y_d;
hipMalloc( &F_d, output_dim*sizeof(double) );
hipMalloc( &JtF_d, input_dim *sizeof(double) );
hipMalloc( &Jq_d, output_dim*sizeof(double) );
hipMalloc( &JtJq_d, input_dim *sizeof(double) );
hipMalloc( &step_d, input_dim *sizeof(double) );
hipMalloc( &y_d, inner*sizeof(double) );
//GPU MATRICES
double *q_d; //Matrix containing orthonormal basis vectors of krylov subspace.
hipMalloc( &q_d, input_dim*(inner+1)*sizeof(double) );
gsl_matrix *h = gsl_matrix_calloc(inner+1, inner ); //Hessenberg form of matrix
gsl_matrix *hth = gsl_matrix_calloc(inner, inner ); //h'*h in matlab
gsl_vector *b2 = gsl_vector_calloc(inner+1); //The right hand side of GMRES. It's a trivial vector with one non-zero element.
gsl_vector *htb2 = gsl_vector_calloc(inner);
gsl_vector *y = gsl_vector_calloc(inner);
for(int i=0; i< *max_iterations; i++){
int outer_iterations = 0; //Reset each Newton step
//Step 1: check |F| or |b| = |J'*F| to monitor convergence
if(overconstrained){
EVAL (obj, F_d, state_d, params);
EVAL_Jt(obj, JtF_d, F_d, state_d, params);
hipblasDnrm2( handle, output_dim, F_d, 1, &normF );
hipblasDnrm2( handle, input_dim, JtF_d, 1, &normJtF );
hipDeviceSynchronize();
printf( "Iteration %d: |F| = %.9e, |J'*F| = %.9e\n", i, normF, normJtF );
normF_series[i] = normF;
normJtF_series[i] = normJtF;
if( normJtF < threshold ){
//Only look for exit condition from normbJtF, which should be identically zero at a local minima
printf("|J'*F| is less than specified threshold. Exiting Newton...\n");
return;
}
}
else{
//Not overconstrained
EVAL(obj, F_d, state_d, params);
hipblasDnrm2( handle, output_dim, F_d, 1, &normF );
hipDeviceSynchronize();
printf("Iteration %d: |F| = %.9e\n", i, normF );
if( normF < threshold ){
printf("|F| is less than specified threshold. Exiting Newton...\n");
(*max_iterations) = i+1;
return;
}
}
//Simplifies code to introduce pointer "work". This is where A*(column of q) lives
//It depends on if the problem is overconstrained
double *work;
work = overconstrained ? JtJq_d : Jq_d;
//Repeated iterations of GMRES start here with an updated step
restart:
if(overconstrained){
//In these lines, store "b - J'*J(step)" in the first column of q
EVAL_J (obj, Jq_d, step_d, state_d, params); // jq <- J(step)
EVAL_Jt(obj, work, Jq_d, state_d, params); // work <- J(step)
double minus_one = -1;
hipblasDaxpy(handle, input_dim, &minus_one, JtF_d, 1, work, 1); //work <- work - JtF
}
else{
//In these lines, store "f - J(step)" in the first column of q
EVAL_J(obj, work, step_d, state_d, params); // jq <- J(step)
double minus_one = -1;
hipblasDaxpy(handle, input_dim, &minus_one, F_d, 1, work, 1); //work <- work - F
}
//Compute norm
hipblasDnrm2( handle, input_dim, work, 1, &h_element );
hipDeviceSynchronize(); //Sadly we need to wait for this norm to be computed.
double temp = -1/h_element;
hipblasDscal( handle, input_dim, &temp, work, 1 ); //work <- (F - J(step))/norm gives a unit vector!
gsl_vector_set(b2, 0, h_element ); //This is the only non-zero element of b2
//Set the first column of q to this unit vector
hipblasDcopy( handle, input_dim, work, 1, q_d, 1 ); //Copy this unit vector to the first column of q_d
for(int j=0; j<inner; j++){
q_col_d = &q_d[j*input_dim]; //pointer to the relevant column
if(overconstrained){
EVAL_J (obj, Jq_d, q_col_d, state_d, params);
EVAL_Jt(obj, work, Jq_d, state_d, params);
}
else{
EVAL_J(obj, work, q_col_d, state_d, params);
}
for(int k=0; k<=j; k++){
q_col2_d = &q_d[k*input_dim];
hipblasDdot( handle, input_dim, q_col2_d, 1, work, 1, &h_element);
hipDeviceSynchronize(); //Sadly we need to wait for this next step
gsl_matrix_set(h, k, j, h_element); //I'll stick with CPU implementation for now
//printf("h(%d,%d) = %.6e\n", k, j, h_element);
temp = -h_element;
hipblasDaxpy( handle, input_dim, &temp, q_col2_d, 1, work, 1 );
}
hipblasDnrm2( handle, input_dim, work, 1, &h_element );
hipDeviceSynchronize(); //Sadly we need to wait for this next step
gsl_matrix_set(h, j+1, j, h_element);
//printf("h(%d,%d) = %.6e\n", j+1, j, h_element);
temp = 1/h_element;
hipblasDscal( handle, input_dim, &temp, work, 1 );
hipblasDcopy( handle, input_dim, work, 1, &q_d[(j+1)*input_dim], 1 );
}
if(DEBUG){ print_vector(b2, "b2"); }
if(DEBUG){ print_matrix(h, "h"); }
//print_matrix(q, "q");
//Set hth to H'*H
gsl_blas_dgemm( CblasTrans, CblasNoTrans, 1, h, h, 0, hth );
//Set htb2 to H'*b2
gsl_blas_dgemv( CblasTrans, 1, h, b2, 0, htb2 );
//Solve the linear system with LU decomp
//Stolen from gsl documentation
//https://www.gnu.org/software/gsl/doc/html/linalg.html
gsl_permutation *p = gsl_permutation_alloc(inner); int s;
gsl_linalg_LU_decomp(hth, p, &s);
gsl_linalg_LU_solve(hth, p, htb2, y);
gsl_permutation_free(p);
if(DEBUG){ print_vector(y, "y"); }
//Use y to update step
hipMemcpy( y_d, y->data, inner*sizeof(double), hipMemcpyHostToDevice );
temp = 1;
//Calling CUBLAS's gemv correctly is the trickiest part of this
hipblasDgemv( handle, // handle is needed for all CUBLAS operations
HIPBLAS_OP_N, // HIPBLAS_OP_N - no transpose, HIPBLAS_OP_T - transpose
input_dim, // number of rows of A
inner, // number of columns of A
&temp, // Alpha
q_d, // pointer to A
input_dim, // lda
y_d, // pointer to x
1, // incx
&temp, // Beta
step_d, // pointer to y
1 // incy
);
//q->size2--;//Hide last column of q
//gsl_blas_dgemv( CblasNoTrans, 1, q, y, 1, step ); //update step
//q->size2++;//Restore last column of q
//Check residual
if(overconstrained){
EVAL_J (obj, Jq_d, step_d, state_d, params);
EVAL_Jt(obj, JtJq_d, Jq_d, state_d, params);
double minus_one = -1;
hipblasDaxpy( handle, input_dim, &minus_one, JtF_d, 1, JtJq_d, 1);
hipblasDnrm2( handle, input_dim, JtJq_d, 1, &residual );
hipDeviceSynchronize();
residual = residual/normJtF;
}
else{
EVAL_J(obj, Jq_d, step_d, state_d, params); // jq <- J(step)
double minus_one = -1;
hipblasDaxpy( handle, input_dim, &minus_one, F_d, 1, Jq_d, 1);
hipblasDnrm2( handle, input_dim, Jq_d, 1, &residual );
hipDeviceSynchronize();
residual = residual/normF;
}
gmres_residual_series[i] = residual;
outer_iterations++; //congratulations you have completed an outer iteration
if( residual > 1e-5 & outer_iterations < outer ){
goto restart;
}
//Only display residual at end of iterations
printf("GMRES residual = %e\n", residual);
//Newton step
//feel free to damp and use like -0.1
temp = -DAMP; //How much to move along the Newton direction.
hipblasDaxpy( handle, input_dim, &temp, step_d, 1, state_d, 1);
fourier_filter_state( state_d );
//check for exiting
BREAK_IF_Q
}
//Vectors
hipFree( F_d );
hipFree( JtF_d );
hipFree( Jq_d );
hipFree( JtJq_d );
hipFree( step_d );
//Matrices
hipFree( q_d );
gsl_matrix_free(h);
gsl_matrix_free(hth);
gsl_vector_free(y);
gsl_vector_free(b2);
gsl_vector_free(htb2);
stop = clock();
printf("Newton-GMRES took %f seconds. Have a nice day!\n", (double)(stop - start)/CLOCKS_PER_SEC );
}
| 4d055ececc25ef685714fbdb8710ab9ca9c1f507.cu | #include "objective_function.h"
#include "fourier_filter.h"
void cuda_newton_gmres( objective_function *obj, double* state_d, int *max_iterations, double threshold, void *params, int inner, int outer, double *gmres_residual_series, double *normF_series, double *normJtF_series ){
/*
PURPOSE:
The purpose of this function is to find a zero (or minimum) of an objective function depedend on a large number of real variables.
Traditional generalized Newton's method relies on inverting the Jacobian or computing a psuedo-inverse. When the number of variables is large (>10^6),
this computation is infeasible on useful timescales. GMRES is used to approximate a Newton step.
INPUT:
objective_function *obj - the function we are trying to minimize.
gsl_vector *state - the initial guess for the minimal point
int max_iterations - the number of Newton steps this function is allowed to take.
double threshold - Newton ill exit if the norm of the objective function falls below this threshold
void *params - a pointer to parameters needed by the objective function. NULL if you don't need anything
int inner - number of inner iterations used in GMRES
OUTPUT:
double *state_d - will be overwritten to contain the new estimate of minimum
*/
clock_t start, stop; //Used for timing GMRES
start = clock();
int input_dim = obj->input_dim;
int output_dim = obj->output_dim;
if( output_dim < input_dim ){
printf("GMRES did not launch: Your problem is underconstrained.\n");
return;
}
int overconstrained = output_dim > input_dim;
double normF, normJtF;
double residual;
double h_element;
//CUBLAS requires a handle
cublasHandle_t handle;
cublasCreate( &handle );
//GPU VECTORS
double *F_d; //objective function on the GPU
double *JtF_d; //J'*F on the GPU. Used to minimize over-constrained problems
double *Jq_d;
double *JtJq_d;
double *step_d;
double *q_col_d; //Doesn't need allocated
double *q_col2_d; //Doesn't need allocated
double *y_d;
cudaMalloc( &F_d, output_dim*sizeof(double) );
cudaMalloc( &JtF_d, input_dim *sizeof(double) );
cudaMalloc( &Jq_d, output_dim*sizeof(double) );
cudaMalloc( &JtJq_d, input_dim *sizeof(double) );
cudaMalloc( &step_d, input_dim *sizeof(double) );
cudaMalloc( &y_d, inner*sizeof(double) );
//GPU MATRICES
double *q_d; //Matrix containing orthonormal basis vectors of krylov subspace.
cudaMalloc( &q_d, input_dim*(inner+1)*sizeof(double) );
gsl_matrix *h = gsl_matrix_calloc(inner+1, inner ); //Hessenberg form of matrix
gsl_matrix *hth = gsl_matrix_calloc(inner, inner ); //h'*h in matlab
gsl_vector *b2 = gsl_vector_calloc(inner+1); //The right hand side of GMRES. It's a trivial vector with one non-zero element.
gsl_vector *htb2 = gsl_vector_calloc(inner);
gsl_vector *y = gsl_vector_calloc(inner);
for(int i=0; i< *max_iterations; i++){
int outer_iterations = 0; //Reset each Newton step
//Step 1: check |F| or |b| = |J'*F| to monitor convergence
if(overconstrained){
EVAL (obj, F_d, state_d, params);
EVAL_Jt(obj, JtF_d, F_d, state_d, params);
cublasDnrm2( handle, output_dim, F_d, 1, &normF );
cublasDnrm2( handle, input_dim, JtF_d, 1, &normJtF );
cudaDeviceSynchronize();
printf( "Iteration %d: |F| = %.9e, |J'*F| = %.9e\n", i, normF, normJtF );
normF_series[i] = normF;
normJtF_series[i] = normJtF;
if( normJtF < threshold ){
//Only look for exit condition from normbJtF, which should be identically zero at a local minima
printf("|J'*F| is less than specified threshold. Exiting Newton...\n");
return;
}
}
else{
//Not overconstrained
EVAL(obj, F_d, state_d, params);
cublasDnrm2( handle, output_dim, F_d, 1, &normF );
cudaDeviceSynchronize();
printf("Iteration %d: |F| = %.9e\n", i, normF );
if( normF < threshold ){
printf("|F| is less than specified threshold. Exiting Newton...\n");
(*max_iterations) = i+1;
return;
}
}
//Simplifies code to introduce pointer "work". This is where A*(column of q) lives
//It depends on if the problem is overconstrained
double *work;
work = overconstrained ? JtJq_d : Jq_d;
//Repeated iterations of GMRES start here with an updated step
restart:
if(overconstrained){
//In these lines, store "b - J'*J(step)" in the first column of q
EVAL_J (obj, Jq_d, step_d, state_d, params); // jq <- J(step)
EVAL_Jt(obj, work, Jq_d, state_d, params); // work <- J(step)
double minus_one = -1;
cublasDaxpy(handle, input_dim, &minus_one, JtF_d, 1, work, 1); //work <- work - JtF
}
else{
//In these lines, store "f - J(step)" in the first column of q
EVAL_J(obj, work, step_d, state_d, params); // jq <- J(step)
double minus_one = -1;
cublasDaxpy(handle, input_dim, &minus_one, F_d, 1, work, 1); //work <- work - F
}
//Compute norm
cublasDnrm2( handle, input_dim, work, 1, &h_element );
cudaDeviceSynchronize(); //Sadly we need to wait for this norm to be computed.
double temp = -1/h_element;
cublasDscal( handle, input_dim, &temp, work, 1 ); //work <- (F - J(step))/norm gives a unit vector!
gsl_vector_set(b2, 0, h_element ); //This is the only non-zero element of b2
//Set the first column of q to this unit vector
cublasDcopy( handle, input_dim, work, 1, q_d, 1 ); //Copy this unit vector to the first column of q_d
for(int j=0; j<inner; j++){
q_col_d = &q_d[j*input_dim]; //pointer to the relevant column
if(overconstrained){
EVAL_J (obj, Jq_d, q_col_d, state_d, params);
EVAL_Jt(obj, work, Jq_d, state_d, params);
}
else{
EVAL_J(obj, work, q_col_d, state_d, params);
}
for(int k=0; k<=j; k++){
q_col2_d = &q_d[k*input_dim];
cublasDdot( handle, input_dim, q_col2_d, 1, work, 1, &h_element);
cudaDeviceSynchronize(); //Sadly we need to wait for this next step
gsl_matrix_set(h, k, j, h_element); //I'll stick with CPU implementation for now
//printf("h(%d,%d) = %.6e\n", k, j, h_element);
temp = -h_element;
cublasDaxpy( handle, input_dim, &temp, q_col2_d, 1, work, 1 );
}
cublasDnrm2( handle, input_dim, work, 1, &h_element );
cudaDeviceSynchronize(); //Sadly we need to wait for this next step
gsl_matrix_set(h, j+1, j, h_element);
//printf("h(%d,%d) = %.6e\n", j+1, j, h_element);
temp = 1/h_element;
cublasDscal( handle, input_dim, &temp, work, 1 );
cublasDcopy( handle, input_dim, work, 1, &q_d[(j+1)*input_dim], 1 );
}
if(DEBUG){ print_vector(b2, "b2"); }
if(DEBUG){ print_matrix(h, "h"); }
//print_matrix(q, "q");
//Set hth to H'*H
gsl_blas_dgemm( CblasTrans, CblasNoTrans, 1, h, h, 0, hth );
//Set htb2 to H'*b2
gsl_blas_dgemv( CblasTrans, 1, h, b2, 0, htb2 );
//Solve the linear system with LU decomp
//Stolen from gsl documentation
//https://www.gnu.org/software/gsl/doc/html/linalg.html
gsl_permutation *p = gsl_permutation_alloc(inner); int s;
gsl_linalg_LU_decomp(hth, p, &s);
gsl_linalg_LU_solve(hth, p, htb2, y);
gsl_permutation_free(p);
if(DEBUG){ print_vector(y, "y"); }
//Use y to update step
cudaMemcpy( y_d, y->data, inner*sizeof(double), cudaMemcpyHostToDevice );
temp = 1;
//Calling CUBLAS's gemv correctly is the trickiest part of this
cublasDgemv( handle, // handle is needed for all CUBLAS operations
CUBLAS_OP_N, // CUBLAS_OP_N - no transpose, CUBLAS_OP_T - transpose
input_dim, // number of rows of A
inner, // number of columns of A
&temp, // Alpha
q_d, // pointer to A
input_dim, // lda
y_d, // pointer to x
1, // incx
&temp, // Beta
step_d, // pointer to y
1 // incy
);
//q->size2--;//Hide last column of q
//gsl_blas_dgemv( CblasNoTrans, 1, q, y, 1, step ); //update step
//q->size2++;//Restore last column of q
//Check residual
if(overconstrained){
EVAL_J (obj, Jq_d, step_d, state_d, params);
EVAL_Jt(obj, JtJq_d, Jq_d, state_d, params);
double minus_one = -1;
cublasDaxpy( handle, input_dim, &minus_one, JtF_d, 1, JtJq_d, 1);
cublasDnrm2( handle, input_dim, JtJq_d, 1, &residual );
cudaDeviceSynchronize();
residual = residual/normJtF;
}
else{
EVAL_J(obj, Jq_d, step_d, state_d, params); // jq <- J(step)
double minus_one = -1;
cublasDaxpy( handle, input_dim, &minus_one, F_d, 1, Jq_d, 1);
cublasDnrm2( handle, input_dim, Jq_d, 1, &residual );
cudaDeviceSynchronize();
residual = residual/normF;
}
gmres_residual_series[i] = residual;
outer_iterations++; //congratulations you have completed an outer iteration
if( residual > 1e-5 & outer_iterations < outer ){
goto restart;
}
//Only display residual at end of iterations
printf("GMRES residual = %e\n", residual);
//Newton step
//feel free to damp and use like -0.1
temp = -DAMP; //How much to move along the Newton direction.
cublasDaxpy( handle, input_dim, &temp, step_d, 1, state_d, 1);
fourier_filter_state( state_d );
//check for exiting
BREAK_IF_Q
}
//Vectors
cudaFree( F_d );
cudaFree( JtF_d );
cudaFree( Jq_d );
cudaFree( JtJq_d );
cudaFree( step_d );
//Matrices
cudaFree( q_d );
gsl_matrix_free(h);
gsl_matrix_free(hth);
gsl_vector_free(y);
gsl_vector_free(b2);
gsl_vector_free(htb2);
stop = clock();
printf("Newton-GMRES took %f seconds. Have a nice day!\n", (double)(stop - start)/CLOCKS_PER_SEC );
}
|
4da173eb0f7974c69785f5adb7223d49305f972c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tiger/layers/neuron/tanh_layer.hpp"
#include "tiger/utils/device_alternate.hpp"
namespace tiger{
template <typename Dtype>
__global__ void tanh_forward(const int n, const Dtype* in, Dtype* out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
out[i] = tanh(in[i]);
}
}
template <typename Dtype>
void TanhLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>* >& bottom,
const vector<Blob<Dtype>* >& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
hipLaunchKernelGGL(( tanh_forward<Dtype>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
}
template <typename Dtype>
__global__ void tanh_backward(const int n, const Dtype* top_diff, const Dtype* top_data,
Dtype* bottom_diff){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
bottom_diff[i] = top_diff[i] * (1 - top_data[i] * top_data[i]);
}
}
template <typename Dtype>
void TanhLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>* >& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>* >& bottom){
if(!propagate_down[0]){
return;
}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
hipLaunchKernelGGL(( tanh_backward<Dtype>), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
}
template class TanhLayer<float>;
template class TanhLayer<double>;
}
| 4da173eb0f7974c69785f5adb7223d49305f972c.cu | #include "tiger/layers/neuron/tanh_layer.hpp"
#include "tiger/utils/device_alternate.hpp"
namespace tiger{
template <typename Dtype>
__global__ void tanh_forward(const int n, const Dtype* in, Dtype* out){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
out[i] = tanh(in[i]);
}
}
template <typename Dtype>
void TanhLayer<Dtype>::forward_gpu(const vector<Blob<Dtype>* >& bottom,
const vector<Blob<Dtype>* >& top){
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
tanh_forward<Dtype><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
}
template <typename Dtype>
__global__ void tanh_backward(const int n, const Dtype* top_diff, const Dtype* top_data,
Dtype* bottom_diff){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
bottom_diff[i] = top_diff[i] * (1 - top_data[i] * top_data[i]);
}
}
template <typename Dtype>
void TanhLayer<Dtype>::backward_gpu(const vector<Blob<Dtype>* >& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>* >& bottom){
if(!propagate_down[0]){
return;
}
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const int count = bottom[0]->count();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
tanh_backward<Dtype><<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
}
template class TanhLayer<float>;
template class TanhLayer<double>;
}
|
4b4d13bc19b6e38967c18236bfac4728291195b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudafunc.h>
//{{{ print_device_prop
void print_device_prop(hipDeviceProp_t prop){
INFO("Name: \t\t\t%s\n",prop.name);
INFO("Compute Capability: \t%d.%d\n",prop.major,prop.minor);
INFO("Integrated: \t\t%s\n",(prop.integrated == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Can map host mem: \t%s\n",(prop.canMapHostMemory == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Clock rate: \t\t%d\n",prop.clockRate);
INFO("Memory clock rate: \t%d\n",prop.memoryClockRate);
INFO("Concurrent Kernels: \t%s\n",(prop.concurrentKernels == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("MP Count: \t\t%d\n",prop.multiProcessorCount);
INFO("Max threads per MP: \t%d\n",prop.maxThreadsPerMultiProcessor);
INFO("Max threads per block: \t%d\n",prop.maxThreadsPerBlock);
INFO("Total const mem: \t%d\n",prop.totalConstMem);
INFO("Total global mem: \t%d\n",prop.totalGlobalMem);
INFO("Warp size: \t\t%d\n",prop.warpSize);
INFO("Async engine count: \t%d\n",prop.asyncEngineCount);
INFO("Max grid size x: \t%d\n",prop.maxGridSize[0]);
INFO("Max grid size y: \t%d\n",prop.maxGridSize[1]);
INFO("Max grid size z: \t%d\n",prop.maxGridSize[2]);
INFO("Max threads x: \t\t%d\n",prop.maxThreadsDim[0]);
INFO("Max threads y: \t\t%d\n",prop.maxThreadsDim[1]);
INFO("Max threads z: \t\t%d\n",prop.maxThreadsDim[2]);
}
//}}}
//{{{ show_devices
void show_devices(){
hipDeviceProp_t prop;
int count;
hipError_t cuda_error;
cuda_error = hipGetDeviceCount(&count);
if(cuda_error == hipSuccess){
INFO("There are %d devices available\n",count);
for(int i=0;i<count;i++){
cuda_error = hipGetDeviceProperties(&prop,i);
INFO("------------ Device %d ------------\n",i);
print_device_prop(prop);
}
} else ERROR("An error occurred while attempting to retrieve available devices: %s\n",hipGetErrorString(cuda_error));
}
//}}}
//{{{ cuda_free_buffers
void cuda_free_buffers(CUDA_PLAN_T *p){
if(p->in1 != NULL) {
CUDA_ERROR(hipHostFree(p->in1));
p->in1=NULL;
}
if(p->in2 != NULL) {
CUDA_ERROR(hipHostFree(p->in2));
p->in2=NULL;
}
if(p->out != NULL) {
CUDA_ERROR(hipHostFree(p->out));
p->out=NULL;
}
if(!p->use_zero_copy){
int i;
for(i=0;i<p->num_streams;i++){
CUDA_ERROR(hipFree(p->in1_dev[i]));
CUDA_ERROR(hipFree(p->in2_dev[i]));
CUDA_ERROR(hipFree(p->out_dev[i]));
p->in1_dev[i]=NULL;
p->in2_dev[i]=NULL;
p->out_dev[i]=NULL;
}
}
}
//}}}
//{{{ show_plan
void show_plan(CUDA_PLAN_T *p){
if(p->verbose > 0){
INFO("******************* CUDA Plan *******************\n");
INFO("Complex:\t\t%s\n",(p->cmplx == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Zero-copy:\t\t%s\n",(p->use_zero_copy == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("In-place:\t\t%s\n",(p->inplace == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Streams Enabled:\t%s\n",(p->use_streams == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Nstreams:\t\t%d\n",p->num_streams);
INFO("Nchunks:\t\t%d\n",p->nchunks);
INFO("Elem. per chunk:\t%ld\n",p->elem_per_chunk);
INFO("Elem. left-over:\t%d\n",p->elem_leftover);
INFO("Nthreads:\t\t%d\n",p->nthreads);
INFO("Nblocks:\t\t%d\n",p->nblocks);
INFO("*************************************************\n");
}
}
//}}}
//{{{ cuda_plan_init
CUDA_PLAN_T * cuda_plan_init(long nelem, int dev_num, int nblocks, int nthreads, int flags, int verbose){
CUDA_PLAN_T *p;
int i;
if((p = (CUDA_PLAN_T *) calloc(1,sizeof(CUDA_PLAN_T))) == NULL){
ERROR("Failed to allocate memory for the CUDA plan\n");
}
else {
// Initialize the plan
// TODO: Need to check this to make sure it's > 0
p->nelem=nelem;
p->in1=NULL;
p->in2=NULL;
p->out=NULL;
for(i=0;i<CUDA_MAX_STREAMS;i++){
p->in1_dev[i]=NULL;
p->in2_dev[i]=NULL;
p->out_dev[i]=NULL;
}
p->flags=flags;
p->nblocks=nblocks;
p->nthreads=nthreads;
p->verbose=verbose;
p->elem_leftover=0;
// Parse the flags
p->cmplx=(flags&0x1);
p->inplace=(flags&0x2)>>1;
p->use_zero_copy=(flags&0x4)>>2;
memset(&(p->prop),0,sizeof(hipDeviceProp_t));
if(dev_num > -1){
// If a specific device was requested then assume the user knows
// what they're doing and blindly set the device and grab the
// properties.
p->gpu = dev_num;
CUDA_ERROR_SETUP(hipSetDevice(p->gpu));
CUDA_ERROR_SETUP(hipGetDeviceProperties(&(p->prop),p->gpu));
if(p->verbose > 0){
INFO("------------ Selected Device ------------\n");
print_device_prop(p->prop);
}
}
else {
// Check for a device that can meet the libraries maximum requirements
p->prop.major=3;
p->prop.minor=0;
CUDA_ERROR_SETUP(hipChooseDevice(&(p->gpu),&(p->prop)));
CUDA_ERROR_SETUP(hipSetDevice(p->gpu));
CUDA_ERROR_SETUP(hipGetDeviceProperties(&(p->prop),p->gpu));
if(p->verbose > 0){
INFO("------------ Selected Device ------------\n");
print_device_prop(p->prop);
}
}
// If this is the case then you will realize no gain in using multiple streams
if(!(p->prop.deviceOverlap)) {
WARN("Device does not support overlap. Use of streams has been turned off.\n");
p->use_streams=0;
}
else p->use_streams=1;
// Number of available streams
// If we aren't using streams then set it to one
// TODO: Work out the stream processing path to use more than 2 streams
//p->num_streams=(p->use_streams ? min(p->prop.multiProcessorCount,2):1);
p->num_streams=(p->use_streams ? p->prop.multiProcessorCount:1);
// Allocate the memory for the buffers
// NOTE: This process will determine how the follow-on calculations
// will run. The idea here is that we are going to attempt to do
// everything using the memory on the GPU itself. In the case that
// the allocation fails then we will fall back to using zero-copy
// memory buffers which are simply host memory blocks which are pinned
// and thus allow access via a hardware device such as the GPU. It
// should be mentioned that in tests it appears that zero-copy
// buffers are more efficient then copying blocks of memory on and
// off the card. This is something to consider in the future. This
// should be exposed as an option where the user can explicitely
// request zero-copy buffers.
int nfloats=(p->cmplx ? 2:1);
if(!p->use_zero_copy){
// We're going to set our number of chunks to be a multiple of our number of streams
p->nchunks = p->num_streams;
// NOTE: This needs to be set for the nthreads and nblocks calculation which follows
p->elem_per_chunk=nelem/p->nchunks;
p->elem_leftover = (int)roundf((((float)nelem/(float)p->nchunks) - (float)p->elem_per_chunk)*(float)p->nchunks);
// NOTE: In order to use streams the memory must be pinned, or page-locked, due to the use
// of hipMemcpyAsync. This is why hipHostMalloc is used instead of a simple malloc or calloc.
CUDA_ERROR_SETUP(hipHostMalloc((void **)&(p->in1),p->nelem*sizeof(float)*nfloats,hipHostMallocDefault));
CUDA_ERROR_SETUP(hipHostMalloc((void **)&(p->in2),p->nelem*sizeof(float)*nfloats,hipHostMallocDefault));
if(!p->inplace) CUDA_ERROR_SETUP(hipHostMalloc((void **)&(p->out),p->nelem*sizeof(float)*nfloats,hipHostMallocDefault));
for(i=0;i<p->num_streams;i++){
// Allocate the memory on the GPU for each stream.
CUDA_ERROR_SETUP(hipMalloc((void **) &(p->in1_dev[i]), p->elem_per_chunk*sizeof(float)*nfloats));
CUDA_ERROR_SETUP(hipMalloc((void **) &(p->in2_dev[i]), p->elem_per_chunk*sizeof(float)*nfloats));
if(!p->inplace) CUDA_ERROR_SETUP(hipMalloc((void **) &(p->out_dev[i]), p->elem_per_chunk*sizeof(float)*nfloats));
CUDA_ERROR_SETUP(hipStreamCreate(&(p->stream[i])));
}
// If the setup failed then go ahead and get rid of any allocations the succeeded
if(cuda_setup_failed) cuda_free_buffers(p);
}
if(cuda_setup_failed || p->use_zero_copy){
// Reset the flag
if(cuda_setup_failed){
cuda_setup_failed=0;
p->use_zero_copy=1;
WARN("Failed to allocate buffers on the GPU. Falling back to zero-copy host buffers.\n");
}
else if(p->verbose > 0) INFO("Setting up user requested zero-copy host buffers.\n");
// Turn stream processing off
p->use_streams=0;
// We're not using streams with zero copy buffers so we only have 1 "chunk" to process
p->nchunks = 1;
// NOTE: This needs to be set for the nthreads and nblocks calculation which follows
p->elem_per_chunk=nelem/p->nchunks;
// Can we even use zero-copy?
if(p->prop.canMapHostMemory){
// NOTE: The flag hipHostMallocMapped tells hipHostMalloc that we want to use
// this host memory for the GPU. The flag hipHostMallocWriteCombined provides
// a performance increase for those buffers which are only intended to be read
// from the GPU. There will be a considerable decrease in performance if these
// buffers are read by the CPU.
CUDA_ERROR_SETUP(hipHostMalloc((void **) &(p->in1), p->nelem*sizeof(float)*nfloats,hipHostMallocWriteCombined|hipHostMallocMapped));
CUDA_ERROR_SETUP(hipHostMalloc((void **) &(p->in2), p->nelem*sizeof(float)*nfloats,hipHostMallocWriteCombined|hipHostMallocMapped));
// It is more likely the case that the output buffer will be read from the
// CPU. In this case we will setup the output buffer without the flag
// hipHostMallocWriteCombined.
//if(!p->inplace) CUDA_ERROR_SETUP(hipHostMalloc((void **) &(p->out), p->nelem*sizeof(float)*nfloats,hipHostMallocWriteCombined|hipHostMallocMapped));
if(!p->inplace) CUDA_ERROR_SETUP(hipHostMalloc((void **) &(p->out), p->nelem*sizeof(float)*nfloats,hipHostMallocMapped));
// Get pointers to these buffers which work with a GPU
CUDA_ERROR_SETUP(hipHostGetDevicePointer(&(p->in1_dev[0]), p->in1, 0));
CUDA_ERROR_SETUP(hipHostGetDevicePointer(&(p->in2_dev[0]), p->in2, 0));
if(!p->inplace) CUDA_ERROR_SETUP(hipHostGetDevicePointer(&(p->out_dev[0]), p->out, 0));
} else {
// TODO: In this case we ought to setup a generic method which
// will work on any GPU configuration.
ERROR("Device can not map host memory.\n");
cuda_setup_failed=1;
}
}
// Auto-calc the number of blocks and the number of threads per block
// if not explicitely requested
// NOTE: Here we will always attempt to maximize the number of threads
// per block.
if(p->nthreads <= 0) p->nthreads=minl(p->elem_per_chunk,p->prop.maxThreadsPerBlock);
else{
if(p->nthreads > p->prop.maxThreadsPerBlock){
if(p->verbose) WARN("Requested number of threads per block exceeds the maxThreadsPerBlock for this device. Truncating to %d.\n",p->prop.maxThreadsPerBlock);
p->nthreads = p->prop.maxThreadsPerBlock;
}
}
if(p->nblocks <= 0) p->nblocks = p->elem_per_chunk/p->nthreads;
else{
if(p->nblocks > CUDA_MAX_BLOCKS){
if(p->verbose) WARN("Requested number of blocks exceeds the max for this device. Truncating to %d.\n",CUDA_MAX_BLOCKS);
p->nblocks = CUDA_MAX_BLOCKS;
}
}
}
// If the CUDA setup failed then go ahead and free the plan if it
// was created
if(cuda_setup_failed){
cuda_plan_destroy(p);
return NULL;
} else return(p);
}
//}}}
//{{{ cuda_plan_destroy
void cuda_plan_destroy(CUDA_PLAN_T *p){
int i;
if(p != NULL){
cuda_free_buffers(p);
// Destroy any streams
if(p->use_streams) {
for(i=0;i<p->num_streams;i++) CUDA_ERROR(hipStreamDestroy(p->stream[i]));
}
free(p);
p=NULL;
}
return;
}
//}}}
//{{{ HOST
//{{{ complex
//{{{ kernels
void cmplx_conj_mult_kernel_host(hipComplex *a, hipComplex *b, hipComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCmulf(cuConjf(a[i]),b[i]);
}
void cmplx_conj_mult_kernel_host_ip(hipComplex *a, hipComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCmulf(cuConjf(a[i]),b[i]);
}
void cmplx_mult_kernel_host(hipComplex *a, hipComplex *b, hipComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCmulf(a[i],b[i]);
}
void cmplx_mult_kernel_host_ip(hipComplex *a, hipComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCmulf(a[i],b[i]);
}
void cmplx_div_kernel_host(hipComplex *a, hipComplex *b, hipComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCdivf(a[i],b[i]);
}
void cmplx_div_kernel_host_ip(hipComplex *a, hipComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCdivf(a[i],b[i]);
}
void cmplx_add_kernel_host(hipComplex *a, hipComplex *b, hipComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCaddf(a[i],b[i]);
}
void cmplx_add_kernel_host_ip(hipComplex *a, hipComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCaddf(a[i],b[i]);
}
void cmplx_sub_kernel_host(hipComplex *a, hipComplex *b, hipComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCsubf(a[i],b[i]);
}
void cmplx_sub_kernel_host_ip(hipComplex *a, hipComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCsubf(a[i],b[i]);
}
//}}}
//{{{ host_v_cmplx_conj_mult
int host_v_cmplx_conj_mult(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_conj_mult_kernel_host_ip((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_conj_mult_kernel_host((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
(hipComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_mult
int host_v_cmplx_mult(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_mult_kernel_host_ip((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_mult_kernel_host((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
(hipComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_div
int host_v_cmplx_div(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_div_kernel_host_ip((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_div_kernel_host((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
(hipComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_add
int host_v_cmplx_add(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_add_kernel_host_ip((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_add_kernel_host((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
(hipComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_sub
int host_v_cmplx_sub(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_sub_kernel_host_ip((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_sub_kernel_host((hipComplex *)(p->in1),
(hipComplex *)(p->in2),
(hipComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//{{{ real
//{{{ kernels
void real_mult_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]*b[i];
}
void real_mult_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]*b[i];
}
void real_div_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]/b[i];
}
void real_div_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]/b[i];
}
void real_add_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]+b[i];
}
void real_add_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]+b[i];
}
void real_sub_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]-b[i];
}
void real_sub_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]-b[i];
}
//}}}
//{{{ host_v_real_mult
int host_v_real_mult(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_mult_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_mult_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_real_div
int host_v_real_div(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_div_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_div_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_real_add
int host_v_real_add(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_add_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_add_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_real_sub
int host_v_real_sub(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_sub_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_sub_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//}}}
//{{{ CUDA
//{{{ complex
//{{{ kernels
__global__ void cmplx_conj_mult_kernel(hipComplex *a, hipComplex *b, hipComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCmulf(cuConjf(a[tid]),b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_conj_mult_kernel_ip(hipComplex *a, hipComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCmulf(cuConjf(a[tid]),b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_mult_kernel(hipComplex *a, hipComplex *b, hipComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCmulf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_mult_kernel_ip(hipComplex *a, hipComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCmulf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_div_kernel(hipComplex *a, hipComplex *b, hipComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCdivf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_div_kernel_ip(hipComplex *a, hipComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCdivf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_add_kernel(hipComplex *a, hipComplex *b, hipComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCaddf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_add_kernel_ip(hipComplex *a, hipComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCaddf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_sub_kernel(hipComplex *a, hipComplex *b, hipComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCsubf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_sub_kernel_ip(hipComplex *a, hipComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCsubf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
//}}}
//{{{ cmplx_stream
void cmplx_stream(CUDA_PLAN_T *p,
void(*cmplx_kernel)(hipComplex *a, hipComplex *b, hipComplex *c, long N),
void(*cmplx_kernel_ip)(hipComplex *a, hipComplex *b, long N)
){
int i,j;
// NOTE: nblocks is not p->nblocks!!!
if(p->inplace){
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(hipComplex),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(hipComplex),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
hipLaunchKernelGGL(( cmplx_kernel_ip), dim3(p->nblocks),dim3(p->nthreads),0,p->stream[j], (hipComplex*)(p->in1_dev[j]),
(hipComplex*)(p->in2_dev[j]),
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in2+((i+j)*p->elem_per_chunk*2),
p->in2_dev[j],
p->elem_per_chunk*sizeof(hipComplex),
hipMemcpyDeviceToHost,
p->stream[j]));
}
}
}
else {
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(hipComplex),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(hipComplex),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
hipLaunchKernelGGL(( cmplx_kernel), dim3(p->nblocks),dim3(p->nthreads),0,p->stream[j], (hipComplex*)(p->in1_dev[j]),
(hipComplex*)(p->in2_dev[j]),
(hipComplex*)(p->out_dev[j]),
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->out+((i+j)*p->elem_per_chunk*2),
p->out_dev[j],
p->elem_per_chunk*sizeof(hipComplex),
hipMemcpyDeviceToHost,
p->stream[j]));
}
}
}
for(i=0;i<p->num_streams;p++) CUDA_ERROR_RUNTIME(hipStreamSynchronize(p->stream[i]));
}
//}}}
//{{{ cmplx_zcopy
void cmplx_zcopy(CUDA_PLAN_T *p,
void(*cmplx_kernel)(hipComplex *a, hipComplex *b, hipComplex *c, long N),
void(*cmplx_kernel_ip)(hipComplex *a, hipComplex *b, long N)
){
if(p->inplace){
// Run the cudaKernel
hipLaunchKernelGGL(( cmplx_kernel_ip), dim3(p->nblocks),dim3(p->nthreads), 0, 0, (hipComplex *)(p->in1_dev[0]),
(hipComplex *)(p->in2_dev[0]),
p->nelem);
}
else{
// Run the cudaKernel
hipLaunchKernelGGL(( cmplx_kernel), dim3(p->nblocks),dim3(p->nthreads), 0, 0, (hipComplex *)(p->in1_dev[0]),
(hipComplex *)(p->in2_dev[0]),
(hipComplex *)(p->out_dev[0]),
p->nelem);
}
// NOTE: This is a key piece in using zero-copy memory
hipDeviceSynchronize();
}
//}}}
//{{{ cuda_v_cmplx_conj_mult
int cuda_v_cmplx_conj_mult(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
#ifdef _SHOWTIME
hipEvent_t start,stop;
float elapsedTime;
CUDA_ERROR_RUNTIME(hipEventCreate(&start));
CUDA_ERROR_RUNTIME(hipEventCreate(&stop));
#endif
if(p->use_zero_copy){
#ifdef _SHOWTIME
CUDA_ERROR_RUNTIME(hipEventRecord(start,0));
#endif
cmplx_zcopy(p,cmplx_conj_mult_kernel,cmplx_conj_mult_kernel_ip);
#ifdef _SHOWTIME
CUDA_ERROR_RUNTIME(hipEventRecord(stop,0));
CUDA_ERROR_RUNTIME(hipEventSynchronize(stop));
CUDA_ERROR_RUNTIME(hipEventElapsedTime(&elapsedTime,start,stop));
printf("elapsed time: %.12f\n",elapsedTime);
#endif
}
else if(p->use_streams){
cmplx_stream(p,cmplx_conj_mult_kernel,cmplx_conj_mult_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(cuConjf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i]),
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(cuConjf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i]),
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_mult
int cuda_v_cmplx_mult(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_mult_kernel,cmplx_mult_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_mult_kernel,cmplx_mult_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_div
int cuda_v_cmplx_div(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_div_kernel,cmplx_div_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_div_kernel,cmplx_div_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCdivf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCdivf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_add
int cuda_v_cmplx_add(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_add_kernel,cmplx_add_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_add_kernel,cmplx_add_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCaddf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCaddf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_sub
int cuda_v_cmplx_sub(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_sub_kernel,cmplx_sub_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_sub_kernel,cmplx_sub_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCsubf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((hipComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCsubf(((hipComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((hipComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//{{{ real
//{{{ kernels
__global__ void real_mult_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]*b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_mult_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]*b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_div_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]/b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_div_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]/b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_add_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]+b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_add_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]+b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_sub_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]-b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_sub_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]-b[tid];
tid += blockDim.x * gridDim.x;
}
}
//}}}
//{{{ real_stream
void real_stream(CUDA_PLAN_T *p,
void(*real_kernel)(float *a, float *b, float *c, long N),
void(*real_kernel_ip)(float *a, float *b, long N)
){
int i,j;
// NOTE: nblocks is not p->nblocks!!!
if(p->inplace){
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
hipLaunchKernelGGL(( real_kernel_ip), dim3(p->nblocks),dim3(p->nthreads),0,p->stream[j], p->in1_dev[j],
p->in2_dev[j],
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in2+((i+j)*p->elem_per_chunk),
p->in2_dev[j],
p->elem_per_chunk*sizeof(float),
hipMemcpyDeviceToHost,
p->stream[j]));
}
}
}
else {
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
hipMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
hipLaunchKernelGGL(( real_kernel), dim3(p->nblocks),dim3(p->nthreads),0,p->stream[j], p->in1_dev[j],
p->in2_dev[j],
p->out_dev[j],
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(hipMemcpyAsync(p->out+((i+j)*p->elem_per_chunk),
p->out_dev[j],
p->elem_per_chunk*sizeof(float),
hipMemcpyDeviceToHost,
p->stream[j]));
}
}
}
for(i=0;i<p->num_streams;i++) CUDA_ERROR_RUNTIME(hipStreamSynchronize(p->stream[i]));
}
//}}}
//{{{ real_zcopy
void real_zcopy(CUDA_PLAN_T *p,
void(*real_kernel)(float *a, float *b, float *c, long N),
void(*real_kernel_ip)(float *a, float *b, long N)
){
if(p->inplace){
// Run the cudaKernel
hipLaunchKernelGGL(( real_kernel_ip), dim3(p->nblocks),dim3(p->nthreads), 0, 0, p->in1_dev[0],
p->in2_dev[0],
p->nelem);
}
else{
// Run the cudaKernel
hipLaunchKernelGGL(( real_kernel), dim3(p->nblocks),dim3(p->nthreads), 0, 0, p->in1_dev[0],
p->in2_dev[0],
p->out_dev[0],
p->nelem);
}
// NOTE: This is a key piece in using zero-copy memory
hipDeviceSynchronize();
}
//}}}
//{{{ cuda_v_real_mul
int cuda_v_real_mult(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_mult_kernel,real_mult_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_mult_kernel,real_mult_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]*p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]*p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_real_div
int cuda_v_real_div(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_div_kernel,real_div_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_div_kernel,real_div_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]/p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]/p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_real_add
int cuda_v_real_add(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_add_kernel,real_add_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_add_kernel,real_add_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]+p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]+p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_real_sub
int cuda_v_real_sub(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_sub_kernel,real_sub_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_sub_kernel,real_sub_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]-p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]-p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//}}}
| 4b4d13bc19b6e38967c18236bfac4728291195b8.cu | #include <cudafunc.h>
//{{{ print_device_prop
void print_device_prop(cudaDeviceProp prop){
INFO("Name: \t\t\t%s\n",prop.name);
INFO("Compute Capability: \t%d.%d\n",prop.major,prop.minor);
INFO("Integrated: \t\t%s\n",(prop.integrated == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Can map host mem: \t%s\n",(prop.canMapHostMemory == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Clock rate: \t\t%d\n",prop.clockRate);
INFO("Memory clock rate: \t%d\n",prop.memoryClockRate);
INFO("Concurrent Kernels: \t%s\n",(prop.concurrentKernels == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("MP Count: \t\t%d\n",prop.multiProcessorCount);
INFO("Max threads per MP: \t%d\n",prop.maxThreadsPerMultiProcessor);
INFO("Max threads per block: \t%d\n",prop.maxThreadsPerBlock);
INFO("Total const mem: \t%d\n",prop.totalConstMem);
INFO("Total global mem: \t%d\n",prop.totalGlobalMem);
INFO("Warp size: \t\t%d\n",prop.warpSize);
INFO("Async engine count: \t%d\n",prop.asyncEngineCount);
INFO("Max grid size x: \t%d\n",prop.maxGridSize[0]);
INFO("Max grid size y: \t%d\n",prop.maxGridSize[1]);
INFO("Max grid size z: \t%d\n",prop.maxGridSize[2]);
INFO("Max threads x: \t\t%d\n",prop.maxThreadsDim[0]);
INFO("Max threads y: \t\t%d\n",prop.maxThreadsDim[1]);
INFO("Max threads z: \t\t%d\n",prop.maxThreadsDim[2]);
}
//}}}
//{{{ show_devices
void show_devices(){
cudaDeviceProp prop;
int count;
cudaError cuda_error;
cuda_error = cudaGetDeviceCount(&count);
if(cuda_error == cudaSuccess){
INFO("There are %d devices available\n",count);
for(int i=0;i<count;i++){
cuda_error = cudaGetDeviceProperties(&prop,i);
INFO("------------ Device %d ------------\n",i);
print_device_prop(prop);
}
} else ERROR("An error occurred while attempting to retrieve available devices: %s\n",cudaGetErrorString(cuda_error));
}
//}}}
//{{{ cuda_free_buffers
void cuda_free_buffers(CUDA_PLAN_T *p){
if(p->in1 != NULL) {
CUDA_ERROR(cudaFreeHost(p->in1));
p->in1=NULL;
}
if(p->in2 != NULL) {
CUDA_ERROR(cudaFreeHost(p->in2));
p->in2=NULL;
}
if(p->out != NULL) {
CUDA_ERROR(cudaFreeHost(p->out));
p->out=NULL;
}
if(!p->use_zero_copy){
int i;
for(i=0;i<p->num_streams;i++){
CUDA_ERROR(cudaFree(p->in1_dev[i]));
CUDA_ERROR(cudaFree(p->in2_dev[i]));
CUDA_ERROR(cudaFree(p->out_dev[i]));
p->in1_dev[i]=NULL;
p->in2_dev[i]=NULL;
p->out_dev[i]=NULL;
}
}
}
//}}}
//{{{ show_plan
void show_plan(CUDA_PLAN_T *p){
if(p->verbose > 0){
INFO("******************* CUDA Plan *******************\n");
INFO("Complex:\t\t%s\n",(p->cmplx == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Zero-copy:\t\t%s\n",(p->use_zero_copy == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("In-place:\t\t%s\n",(p->inplace == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Streams Enabled:\t%s\n",(p->use_streams == 1 ? "\033[92mYes\033[0m":"\033[91mNo\033[0m"));
INFO("Nstreams:\t\t%d\n",p->num_streams);
INFO("Nchunks:\t\t%d\n",p->nchunks);
INFO("Elem. per chunk:\t%ld\n",p->elem_per_chunk);
INFO("Elem. left-over:\t%d\n",p->elem_leftover);
INFO("Nthreads:\t\t%d\n",p->nthreads);
INFO("Nblocks:\t\t%d\n",p->nblocks);
INFO("*************************************************\n");
}
}
//}}}
//{{{ cuda_plan_init
CUDA_PLAN_T * cuda_plan_init(long nelem, int dev_num, int nblocks, int nthreads, int flags, int verbose){
CUDA_PLAN_T *p;
int i;
if((p = (CUDA_PLAN_T *) calloc(1,sizeof(CUDA_PLAN_T))) == NULL){
ERROR("Failed to allocate memory for the CUDA plan\n");
}
else {
// Initialize the plan
// TODO: Need to check this to make sure it's > 0
p->nelem=nelem;
p->in1=NULL;
p->in2=NULL;
p->out=NULL;
for(i=0;i<CUDA_MAX_STREAMS;i++){
p->in1_dev[i]=NULL;
p->in2_dev[i]=NULL;
p->out_dev[i]=NULL;
}
p->flags=flags;
p->nblocks=nblocks;
p->nthreads=nthreads;
p->verbose=verbose;
p->elem_leftover=0;
// Parse the flags
p->cmplx=(flags&0x1);
p->inplace=(flags&0x2)>>1;
p->use_zero_copy=(flags&0x4)>>2;
memset(&(p->prop),0,sizeof(cudaDeviceProp));
if(dev_num > -1){
// If a specific device was requested then assume the user knows
// what they're doing and blindly set the device and grab the
// properties.
p->gpu = dev_num;
CUDA_ERROR_SETUP(cudaSetDevice(p->gpu));
CUDA_ERROR_SETUP(cudaGetDeviceProperties(&(p->prop),p->gpu));
if(p->verbose > 0){
INFO("------------ Selected Device ------------\n");
print_device_prop(p->prop);
}
}
else {
// Check for a device that can meet the libraries maximum requirements
p->prop.major=3;
p->prop.minor=0;
CUDA_ERROR_SETUP(cudaChooseDevice(&(p->gpu),&(p->prop)));
CUDA_ERROR_SETUP(cudaSetDevice(p->gpu));
CUDA_ERROR_SETUP(cudaGetDeviceProperties(&(p->prop),p->gpu));
if(p->verbose > 0){
INFO("------------ Selected Device ------------\n");
print_device_prop(p->prop);
}
}
// If this is the case then you will realize no gain in using multiple streams
if(!(p->prop.deviceOverlap)) {
WARN("Device does not support overlap. Use of streams has been turned off.\n");
p->use_streams=0;
}
else p->use_streams=1;
// Number of available streams
// If we aren't using streams then set it to one
// TODO: Work out the stream processing path to use more than 2 streams
//p->num_streams=(p->use_streams ? min(p->prop.multiProcessorCount,2):1);
p->num_streams=(p->use_streams ? p->prop.multiProcessorCount:1);
// Allocate the memory for the buffers
// NOTE: This process will determine how the follow-on calculations
// will run. The idea here is that we are going to attempt to do
// everything using the memory on the GPU itself. In the case that
// the allocation fails then we will fall back to using zero-copy
// memory buffers which are simply host memory blocks which are pinned
// and thus allow access via a hardware device such as the GPU. It
// should be mentioned that in tests it appears that zero-copy
// buffers are more efficient then copying blocks of memory on and
// off the card. This is something to consider in the future. This
// should be exposed as an option where the user can explicitely
// request zero-copy buffers.
int nfloats=(p->cmplx ? 2:1);
if(!p->use_zero_copy){
// We're going to set our number of chunks to be a multiple of our number of streams
p->nchunks = p->num_streams;
// NOTE: This needs to be set for the nthreads and nblocks calculation which follows
p->elem_per_chunk=nelem/p->nchunks;
p->elem_leftover = (int)roundf((((float)nelem/(float)p->nchunks) - (float)p->elem_per_chunk)*(float)p->nchunks);
// NOTE: In order to use streams the memory must be pinned, or page-locked, due to the use
// of cudaMemcpyAsync. This is why cudaHostAlloc is used instead of a simple malloc or calloc.
CUDA_ERROR_SETUP(cudaHostAlloc((void **)&(p->in1),p->nelem*sizeof(float)*nfloats,cudaHostAllocDefault));
CUDA_ERROR_SETUP(cudaHostAlloc((void **)&(p->in2),p->nelem*sizeof(float)*nfloats,cudaHostAllocDefault));
if(!p->inplace) CUDA_ERROR_SETUP(cudaHostAlloc((void **)&(p->out),p->nelem*sizeof(float)*nfloats,cudaHostAllocDefault));
for(i=0;i<p->num_streams;i++){
// Allocate the memory on the GPU for each stream.
CUDA_ERROR_SETUP(cudaMalloc((void **) &(p->in1_dev[i]), p->elem_per_chunk*sizeof(float)*nfloats));
CUDA_ERROR_SETUP(cudaMalloc((void **) &(p->in2_dev[i]), p->elem_per_chunk*sizeof(float)*nfloats));
if(!p->inplace) CUDA_ERROR_SETUP(cudaMalloc((void **) &(p->out_dev[i]), p->elem_per_chunk*sizeof(float)*nfloats));
CUDA_ERROR_SETUP(cudaStreamCreate(&(p->stream[i])));
}
// If the setup failed then go ahead and get rid of any allocations the succeeded
if(cuda_setup_failed) cuda_free_buffers(p);
}
if(cuda_setup_failed || p->use_zero_copy){
// Reset the flag
if(cuda_setup_failed){
cuda_setup_failed=0;
p->use_zero_copy=1;
WARN("Failed to allocate buffers on the GPU. Falling back to zero-copy host buffers.\n");
}
else if(p->verbose > 0) INFO("Setting up user requested zero-copy host buffers.\n");
// Turn stream processing off
p->use_streams=0;
// We're not using streams with zero copy buffers so we only have 1 "chunk" to process
p->nchunks = 1;
// NOTE: This needs to be set for the nthreads and nblocks calculation which follows
p->elem_per_chunk=nelem/p->nchunks;
// Can we even use zero-copy?
if(p->prop.canMapHostMemory){
// NOTE: The flag cudaHostAllocMapped tells cudaHostAlloc that we want to use
// this host memory for the GPU. The flag cudaHostAllocWriteCombined provides
// a performance increase for those buffers which are only intended to be read
// from the GPU. There will be a considerable decrease in performance if these
// buffers are read by the CPU.
CUDA_ERROR_SETUP(cudaHostAlloc((void **) &(p->in1), p->nelem*sizeof(float)*nfloats,cudaHostAllocWriteCombined|cudaHostAllocMapped));
CUDA_ERROR_SETUP(cudaHostAlloc((void **) &(p->in2), p->nelem*sizeof(float)*nfloats,cudaHostAllocWriteCombined|cudaHostAllocMapped));
// It is more likely the case that the output buffer will be read from the
// CPU. In this case we will setup the output buffer without the flag
// cudaHostAllocWriteCombined.
//if(!p->inplace) CUDA_ERROR_SETUP(cudaHostAlloc((void **) &(p->out), p->nelem*sizeof(float)*nfloats,cudaHostAllocWriteCombined|cudaHostAllocMapped));
if(!p->inplace) CUDA_ERROR_SETUP(cudaHostAlloc((void **) &(p->out), p->nelem*sizeof(float)*nfloats,cudaHostAllocMapped));
// Get pointers to these buffers which work with a GPU
CUDA_ERROR_SETUP(cudaHostGetDevicePointer(&(p->in1_dev[0]), p->in1, 0));
CUDA_ERROR_SETUP(cudaHostGetDevicePointer(&(p->in2_dev[0]), p->in2, 0));
if(!p->inplace) CUDA_ERROR_SETUP(cudaHostGetDevicePointer(&(p->out_dev[0]), p->out, 0));
} else {
// TODO: In this case we ought to setup a generic method which
// will work on any GPU configuration.
ERROR("Device can not map host memory.\n");
cuda_setup_failed=1;
}
}
// Auto-calc the number of blocks and the number of threads per block
// if not explicitely requested
// NOTE: Here we will always attempt to maximize the number of threads
// per block.
if(p->nthreads <= 0) p->nthreads=minl(p->elem_per_chunk,p->prop.maxThreadsPerBlock);
else{
if(p->nthreads > p->prop.maxThreadsPerBlock){
if(p->verbose) WARN("Requested number of threads per block exceeds the maxThreadsPerBlock for this device. Truncating to %d.\n",p->prop.maxThreadsPerBlock);
p->nthreads = p->prop.maxThreadsPerBlock;
}
}
if(p->nblocks <= 0) p->nblocks = p->elem_per_chunk/p->nthreads;
else{
if(p->nblocks > CUDA_MAX_BLOCKS){
if(p->verbose) WARN("Requested number of blocks exceeds the max for this device. Truncating to %d.\n",CUDA_MAX_BLOCKS);
p->nblocks = CUDA_MAX_BLOCKS;
}
}
}
// If the CUDA setup failed then go ahead and free the plan if it
// was created
if(cuda_setup_failed){
cuda_plan_destroy(p);
return NULL;
} else return(p);
}
//}}}
//{{{ cuda_plan_destroy
void cuda_plan_destroy(CUDA_PLAN_T *p){
int i;
if(p != NULL){
cuda_free_buffers(p);
// Destroy any streams
if(p->use_streams) {
for(i=0;i<p->num_streams;i++) CUDA_ERROR(cudaStreamDestroy(p->stream[i]));
}
free(p);
p=NULL;
}
return;
}
//}}}
//{{{ HOST
//{{{ complex
//{{{ kernels
void cmplx_conj_mult_kernel_host(cuComplex *a, cuComplex *b, cuComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCmulf(cuConjf(a[i]),b[i]);
}
void cmplx_conj_mult_kernel_host_ip(cuComplex *a, cuComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCmulf(cuConjf(a[i]),b[i]);
}
void cmplx_mult_kernel_host(cuComplex *a, cuComplex *b, cuComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCmulf(a[i],b[i]);
}
void cmplx_mult_kernel_host_ip(cuComplex *a, cuComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCmulf(a[i],b[i]);
}
void cmplx_div_kernel_host(cuComplex *a, cuComplex *b, cuComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCdivf(a[i],b[i]);
}
void cmplx_div_kernel_host_ip(cuComplex *a, cuComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCdivf(a[i],b[i]);
}
void cmplx_add_kernel_host(cuComplex *a, cuComplex *b, cuComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCaddf(a[i],b[i]);
}
void cmplx_add_kernel_host_ip(cuComplex *a, cuComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCaddf(a[i],b[i]);
}
void cmplx_sub_kernel_host(cuComplex *a, cuComplex *b, cuComplex *c, long N){
for(int i=0;i<N;i++) c[i] = cuCsubf(a[i],b[i]);
}
void cmplx_sub_kernel_host_ip(cuComplex *a, cuComplex *b, long N){
for(int i=0;i<N;i++) b[i] = cuCsubf(a[i],b[i]);
}
//}}}
//{{{ host_v_cmplx_conj_mult
int host_v_cmplx_conj_mult(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_conj_mult_kernel_host_ip((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_conj_mult_kernel_host((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
(cuComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_mult
int host_v_cmplx_mult(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_mult_kernel_host_ip((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_mult_kernel_host((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
(cuComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_div
int host_v_cmplx_div(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_div_kernel_host_ip((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_div_kernel_host((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
(cuComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_add
int host_v_cmplx_add(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_add_kernel_host_ip((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_add_kernel_host((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
(cuComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_cmplx_sub
int host_v_cmplx_sub(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
cmplx_sub_kernel_host_ip((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_sub_kernel_host((cuComplex *)(p->in1),
(cuComplex *)(p->in2),
(cuComplex *)(p->out),
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//{{{ real
//{{{ kernels
void real_mult_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]*b[i];
}
void real_mult_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]*b[i];
}
void real_div_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]/b[i];
}
void real_div_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]/b[i];
}
void real_add_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]+b[i];
}
void real_add_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]+b[i];
}
void real_sub_kernel_host(float *a, float *b, float *c, long N){
for(int i=0;i<N;i++) c[i] = a[i]-b[i];
}
void real_sub_kernel_host_ip(float *a, float *b, long N){
for(int i=0;i<N;i++) b[i] = a[i]-b[i];
}
//}}}
//{{{ host_v_real_mult
int host_v_real_mult(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_mult_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_mult_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_real_div
int host_v_real_div(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_div_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_div_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_real_add
int host_v_real_add(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_add_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_add_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ host_v_real_sub
int host_v_real_sub(CUDA_PLAN_T *p){
int status=0;
if(p != NULL){
if(p->inplace){
// Run the cudaKernel
real_sub_kernel_host_ip(p->in1,
p->in2,
p->nelem);
}
else{
// Run the cudaKernel
real_sub_kernel_host(p->in1,
p->in2,
p->out,
p->nelem);
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//}}}
//{{{ CUDA
//{{{ complex
//{{{ kernels
__global__ void cmplx_conj_mult_kernel(cuComplex *a, cuComplex *b, cuComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCmulf(cuConjf(a[tid]),b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_conj_mult_kernel_ip(cuComplex *a, cuComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCmulf(cuConjf(a[tid]),b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_mult_kernel(cuComplex *a, cuComplex *b, cuComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCmulf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_mult_kernel_ip(cuComplex *a, cuComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCmulf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_div_kernel(cuComplex *a, cuComplex *b, cuComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCdivf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_div_kernel_ip(cuComplex *a, cuComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCdivf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_add_kernel(cuComplex *a, cuComplex *b, cuComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCaddf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_add_kernel_ip(cuComplex *a, cuComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCaddf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_sub_kernel(cuComplex *a, cuComplex *b, cuComplex *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = cuCsubf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
__global__ void cmplx_sub_kernel_ip(cuComplex *a, cuComplex *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = cuCsubf(a[tid],b[tid]);
tid += blockDim.x * gridDim.x;
}
}
//}}}
//{{{ cmplx_stream
void cmplx_stream(CUDA_PLAN_T *p,
void(*cmplx_kernel)(cuComplex *a, cuComplex *b, cuComplex *c, long N),
void(*cmplx_kernel_ip)(cuComplex *a, cuComplex *b, long N)
){
int i,j;
// NOTE: nblocks is not p->nblocks!!!
if(p->inplace){
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(cuComplex),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(cuComplex),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
cmplx_kernel_ip<<<p->nblocks,p->nthreads,0,p->stream[j]>>>((cuComplex*)(p->in1_dev[j]),
(cuComplex*)(p->in2_dev[j]),
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in2+((i+j)*p->elem_per_chunk*2),
p->in2_dev[j],
p->elem_per_chunk*sizeof(cuComplex),
cudaMemcpyDeviceToHost,
p->stream[j]));
}
}
}
else {
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(cuComplex),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk*2),
p->elem_per_chunk*sizeof(cuComplex),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
cmplx_kernel<<<p->nblocks,p->nthreads,0,p->stream[j]>>>((cuComplex*)(p->in1_dev[j]),
(cuComplex*)(p->in2_dev[j]),
(cuComplex*)(p->out_dev[j]),
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->out+((i+j)*p->elem_per_chunk*2),
p->out_dev[j],
p->elem_per_chunk*sizeof(cuComplex),
cudaMemcpyDeviceToHost,
p->stream[j]));
}
}
}
for(i=0;i<p->num_streams;p++) CUDA_ERROR_RUNTIME(cudaStreamSynchronize(p->stream[i]));
}
//}}}
//{{{ cmplx_zcopy
void cmplx_zcopy(CUDA_PLAN_T *p,
void(*cmplx_kernel)(cuComplex *a, cuComplex *b, cuComplex *c, long N),
void(*cmplx_kernel_ip)(cuComplex *a, cuComplex *b, long N)
){
if(p->inplace){
// Run the cudaKernel
cmplx_kernel_ip<<<p->nblocks,p->nthreads>>>((cuComplex *)(p->in1_dev[0]),
(cuComplex *)(p->in2_dev[0]),
p->nelem);
}
else{
// Run the cudaKernel
cmplx_kernel<<<p->nblocks,p->nthreads>>>((cuComplex *)(p->in1_dev[0]),
(cuComplex *)(p->in2_dev[0]),
(cuComplex *)(p->out_dev[0]),
p->nelem);
}
// NOTE: This is a key piece in using zero-copy memory
cudaThreadSynchronize();
}
//}}}
//{{{ cuda_v_cmplx_conj_mult
int cuda_v_cmplx_conj_mult(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
#ifdef _SHOWTIME
cudaEvent_t start,stop;
float elapsedTime;
CUDA_ERROR_RUNTIME(cudaEventCreate(&start));
CUDA_ERROR_RUNTIME(cudaEventCreate(&stop));
#endif
if(p->use_zero_copy){
#ifdef _SHOWTIME
CUDA_ERROR_RUNTIME(cudaEventRecord(start,0));
#endif
cmplx_zcopy(p,cmplx_conj_mult_kernel,cmplx_conj_mult_kernel_ip);
#ifdef _SHOWTIME
CUDA_ERROR_RUNTIME(cudaEventRecord(stop,0));
CUDA_ERROR_RUNTIME(cudaEventSynchronize(stop));
CUDA_ERROR_RUNTIME(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("elapsed time: %.12f\n",elapsedTime);
#endif
}
else if(p->use_streams){
cmplx_stream(p,cmplx_conj_mult_kernel,cmplx_conj_mult_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(cuConjf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i]),
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(cuConjf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i]),
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_mult
int cuda_v_cmplx_mult(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_mult_kernel,cmplx_mult_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_mult_kernel,cmplx_mult_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCmulf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_div
int cuda_v_cmplx_div(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_div_kernel,cmplx_div_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_div_kernel,cmplx_div_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCdivf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCdivf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_add
int cuda_v_cmplx_add(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_add_kernel,cmplx_add_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_add_kernel,cmplx_add_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCaddf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCaddf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_cmplx_sub
int cuda_v_cmplx_sub(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
cmplx_zcopy(p,cmplx_sub_kernel,cmplx_sub_kernel_ip);
}
else if(p->use_streams){
cmplx_stream(p,cmplx_sub_kernel,cmplx_sub_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i] = cuCsubf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
((cuComplex*)(p->out))[p->nchunks*p->elem_per_chunk+i] = cuCsubf(((cuComplex*)(p->in1))[p->nchunks*p->elem_per_chunk+i],
((cuComplex*)(p->in2))[p->nchunks*p->elem_per_chunk+i]);
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//{{{ real
//{{{ kernels
__global__ void real_mult_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]*b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_mult_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]*b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_div_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]/b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_div_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]/b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_add_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]+b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_add_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]+b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_sub_kernel(float *a, float *b, float *c, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
c[tid] = a[tid]-b[tid];
tid += blockDim.x * gridDim.x;
}
}
__global__ void real_sub_kernel_ip(float *a, float *b, long N){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while(tid < N){
b[tid] = a[tid]-b[tid];
tid += blockDim.x * gridDim.x;
}
}
//}}}
//{{{ real_stream
void real_stream(CUDA_PLAN_T *p,
void(*real_kernel)(float *a, float *b, float *c, long N),
void(*real_kernel_ip)(float *a, float *b, long N)
){
int i,j;
// NOTE: nblocks is not p->nblocks!!!
if(p->inplace){
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
real_kernel_ip<<<p->nblocks,p->nthreads,0,p->stream[j]>>>(p->in1_dev[j],
p->in2_dev[j],
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in2+((i+j)*p->elem_per_chunk),
p->in2_dev[j],
p->elem_per_chunk*sizeof(float),
cudaMemcpyDeviceToHost,
p->stream[j]));
}
}
}
else {
for(i=0;i<p->nchunks && !cuda_runtime_failed;i+=p->num_streams){
// Spread buffer one across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in1_dev[j],
p->in1+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread buffer two across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->in2_dev[j],
p->in2+((i+j)*p->elem_per_chunk),
p->elem_per_chunk*sizeof(float),
cudaMemcpyHostToDevice,
p->stream[j]));
}
// Spread kernel execution across all streams
for(j=0;j<p->num_streams;j++){
// Run the cudaKernel
real_kernel<<<p->nblocks,p->nthreads,0,p->stream[j]>>>(p->in1_dev[j],
p->in2_dev[j],
p->out_dev[j],
p->elem_per_chunk);
}
// Spread memcopy from device to host across all streams
for(j=0;j<p->num_streams;j++){
CUDA_ERROR_RUNTIME(cudaMemcpyAsync(p->out+((i+j)*p->elem_per_chunk),
p->out_dev[j],
p->elem_per_chunk*sizeof(float),
cudaMemcpyDeviceToHost,
p->stream[j]));
}
}
}
for(i=0;i<p->num_streams;i++) CUDA_ERROR_RUNTIME(cudaStreamSynchronize(p->stream[i]));
}
//}}}
//{{{ real_zcopy
void real_zcopy(CUDA_PLAN_T *p,
void(*real_kernel)(float *a, float *b, float *c, long N),
void(*real_kernel_ip)(float *a, float *b, long N)
){
if(p->inplace){
// Run the cudaKernel
real_kernel_ip<<<p->nblocks,p->nthreads>>>(p->in1_dev[0],
p->in2_dev[0],
p->nelem);
}
else{
// Run the cudaKernel
real_kernel<<<p->nblocks,p->nthreads>>>(p->in1_dev[0],
p->in2_dev[0],
p->out_dev[0],
p->nelem);
}
// NOTE: This is a key piece in using zero-copy memory
cudaThreadSynchronize();
}
//}}}
//{{{ cuda_v_real_mul
int cuda_v_real_mult(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_mult_kernel,real_mult_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_mult_kernel,real_mult_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]*p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]*p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_real_div
int cuda_v_real_div(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_div_kernel,real_div_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_div_kernel,real_div_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]/p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]/p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_real_add
int cuda_v_real_add(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_add_kernel,real_add_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_add_kernel,real_add_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]+p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]+p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//{{{ cuda_v_real_sub
int cuda_v_real_sub(CUDA_PLAN_T *p){
int status=0;
int i;
if(p != NULL){
if(p->use_zero_copy){
real_zcopy(p,real_sub_kernel,real_sub_kernel_ip);
}
else if(p->use_streams){
real_stream(p,real_sub_kernel,real_sub_kernel_ip);
// Handle leftover
if(p->inplace){
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->in2[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]-p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
else {
if(p->elem_leftover > 0){
for(i=0;i<p->elem_leftover;i++){
p->out[p->nchunks*p->elem_per_chunk+i] = p->in1[p->nchunks*p->elem_per_chunk+i]-p->in2[p->nchunks*p->elem_per_chunk+i];
}
}
}
}
else {
}
}
else {
ERROR("Invalid plan.\n");
status=1;
}
return status;
}
//}}}
//}}}
//}}}
|
4da755c0c80b1d960e24a2ec0667bc461016b719.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../gpu_lib/header.h"
#include "../utils/header.h"
#include <cstdio>
namespace ftxj {
__device__ inline float __ReLU(float x){
return x<0.0?0.0:x>32.0?32.0:x;
};
#define WARPSIZE 32
#define GROUPSIZE 32
#define MINIBATCH 32
#define ROW_SUCC_LEN 32
#define NNZ_PRE_COL 32
#define BATCH_BLOCK 32
#define BATCH_SIZE 1792
#define UNROLL 8
__global__ void shared_memory_mm(float* A, float* B, float* C, int* index, float bias){
__shared__ float A_tile[BATCH_BLOCK][NNZ_PRE_COL];
// __shared__ float B_tile[ROW_SUCC_LEN][NNZ_PRE_COL];
//load A
int group_idx = threadIdx.x / GROUPSIZE;
int batch_start = blockIdx.y * BATCH_BLOCK;
int row_succ_start = blockIdx.x;
for(int i = threadIdx.x; i < BATCH_BLOCK * NNZ_PRE_COL; i += blockDim.x) {
A_tile[i % BATCH_BLOCK][i / BATCH_BLOCK] = A[index[row_succ_start * ROW_SUCC_LEN + i / BATCH_BLOCK] * BATCH_SIZE + batch_start + i % BATCH_BLOCK];
}
//load B
// for(int i = threadIdx.x; i < ROW_SUCC_LEN * NNZ_PRE_COL; i += blockDim.x) {
// B_tile[i / NNZ_PRE_COL][i % NNZ_PRE_COL] = B[row_succ_start * ROW_SUCC_LEN * NNZ_PRE_COL + i];
// }
__syncthreads();
// if(threadIdx.x == 0 && blockIdx.x == 0 && blockIdx.y == 0) {
// for(int i = 0; i < ROW_SUCC_LEN; ++i) {
// for(int j = 0; j < NNZ_PRE_COL; ++j) {
// printf("%f\n", B_tile[i][j]);
// }
// }
// }
register float BB[32] = {
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625
};
int B_col = threadIdx.x % ROW_SUCC_LEN;
int A_batch = (threadIdx.x / ROW_SUCC_LEN) * BATCH_BLOCK / 4;
for(int r = 0; r < BATCH_BLOCK / 4; ++r) {
register float res = bias;
for(int i = 0; i < NNZ_PRE_COL; i += UNROLL) {
res += A_tile[A_batch + r][i + 0] * BB[i + 0]; // bank conflict
res += A_tile[A_batch + r][i + 1] * BB[i + 1]; // bank conflict
res += A_tile[A_batch + r][i + 2] * BB[i + 2]; // bank conflict
res += A_tile[A_batch + r][i + 3] * BB[i + 2]; // bank conflict
res += A_tile[A_batch + r][i + 4] * BB[i + 2]; // bank conflict
res += A_tile[A_batch + r][i + 5] * BB[i + 3]; // bank conflict
res += A_tile[A_batch + r][i + 6] * BB[i + 4]; // bank conflict
res += A_tile[A_batch + r][i + 7] * BB[i + 5]; // bank conflict
}
int res_col_idx = B_col >= 16 ? (row_succ_start * 16 + 512 + B_col - 16) : (row_succ_start * 16 + B_col);
// if(res_col_idx == 528 && A_batch == 0) {
// printf("(%d, %d), (%d), %f\n", blockIdx.x, blockIdx.y, threadIdx.x, res);
// }
C[res_col_idx * BATCH_SIZE + blockIdx.y * BATCH_BLOCK + A_batch + r] = __ReLU(res);
}
};
__global__ void batch_parallel(float* __restrict__ A, float* __restrict__ B, float* __restrict__ C, int* __restrict__ index, float bias){
register float res[8] = {0.0};
// register float BB[32] = { // different thread run on same weight
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625
// };
int batch_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * 8;
int index_idx = (col_idx / 32) * 32;
for(int i = 0; i < 32; ++i) {
register float a_tmp = A[index[index_idx + i] * 1024 + batch_idx];
for(int j = 0; j < 8; ++j) {
res[j] += a_tmp * 0.0625;
}
}
if(col_idx ==516 && batch_idx ==0 ) {
printf("%f\n", res[0]);
}
C[(col_idx + 0) * BATCH_SIZE + batch_idx] = __ReLU(res[0]);
C[(col_idx + 1) * BATCH_SIZE + batch_idx] = __ReLU(res[1]);
C[(col_idx + 2) * BATCH_SIZE + batch_idx] = __ReLU(res[2]);
C[(col_idx + 3) * BATCH_SIZE + batch_idx] = __ReLU(res[3]);
C[(col_idx + 4) * BATCH_SIZE + batch_idx] = __ReLU(res[4]);
C[(col_idx + 5) * BATCH_SIZE + batch_idx] = __ReLU(res[5]);
C[(col_idx + 6) * BATCH_SIZE + batch_idx] = __ReLU(res[6]);
C[(col_idx + 7) * BATCH_SIZE + batch_idx] = __ReLU(res[7]);
};
#define BLOCK_LOAD_A_LINE 32
#define BLOCK_LOAD_B_LINE 32
#define BLOCK_REDUCE_LINE 32
#define THREAD_LOAD_A_LINE 4
#define THREAD_LOAD_B_LINE 4
#define THREAD_A_BLOCKS (BLOCK_LOAD_A_LINE / THREAD_LOAD_A_LINE)
#define THREAD_B_BLOCKS (BLOCK_LOAD_B_LINE / THREAD_LOAD_B_LINE)
#define BATCH BATCH_SIZE
__global__ void outer_product_based(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
__shared__ float A_shared_tile[BLOCK_REDUCE_LINE][BLOCK_LOAD_A_LINE];
__shared__ float B_shared_tile[BLOCK_LOAD_B_LINE][BLOCK_REDUCE_LINE];
float C_reg_tile[THREAD_LOAD_A_LINE][THREAD_LOAD_B_LINE] = {0.0};
float A_reg_tile[THREAD_LOAD_A_LINE];
float B_reg_tile[THREAD_LOAD_B_LINE];
const int A_tile_idx = blockIdx.x;
const int B_tile_idx = blockIdx.y;
for(int reduce_axis = 0; reduce_axis < 32; reduce_axis += BLOCK_REDUCE_LINE) {
// Load A, no bank conflict
for(int i = threadIdx.x; i < BLOCK_LOAD_A_LINE * BLOCK_REDUCE_LINE; i += blockDim.x) {
A_shared_tile[i / BLOCK_LOAD_A_LINE][i % BLOCK_LOAD_A_LINE] = A[index[B_tile_idx * 32 + i / BLOCK_LOAD_A_LINE] * BATCH + A_tile_idx * BLOCK_LOAD_A_LINE + i % BLOCK_LOAD_A_LINE];
}
// Load B, no bank conflict
for(int i = threadIdx.x; i < BLOCK_LOAD_B_LINE * BLOCK_REDUCE_LINE; i += blockDim.x) {
B_shared_tile[i / BLOCK_REDUCE_LINE][i % BLOCK_REDUCE_LINE] = B[B_tile_idx * 32 + i];
}
__syncthreads();
//Compute C
for(int r = 0; r < BLOCK_REDUCE_LINE; ++r) {
//Load A to reg
for(int i = 0; i < THREAD_LOAD_A_LINE; ++i) {
A_reg_tile[i] = A_shared_tile[r][(threadIdx.x / THREAD_A_BLOCKS) * THREAD_LOAD_A_LINE + i];
}
//Load B to reg
for(int i = 0; i < THREAD_LOAD_B_LINE; ++i) {
B_reg_tile[i] = B_shared_tile[(threadIdx.x % THREAD_A_BLOCKS) * THREAD_LOAD_B_LINE + i][r];
}
for(int a_idx = 0; a_idx < THREAD_LOAD_A_LINE; ++a_idx) {
for(int b_idx = 0; b_idx < THREAD_LOAD_B_LINE; ++b_idx) {
C_reg_tile[a_idx][b_idx] += A_reg_tile[a_idx] * B_reg_tile[b_idx];
}
}
}
__syncthreads();
}
const int B_write_begin = B_tile_idx * BLOCK_LOAD_B_LINE + (threadIdx.x % THREAD_A_BLOCKS) * THREAD_LOAD_B_LINE;
const int A_write_begin = A_tile_idx * BLOCK_LOAD_A_LINE + (threadIdx.x / THREAD_A_BLOCKS) * THREAD_LOAD_A_LINE;
// write back C
for (int b_idx = 0; b_idx < THREAD_LOAD_B_LINE; ++b_idx) {
for (int a_idx = 0; a_idx < THREAD_LOAD_A_LINE; ++a_idx) {
C[(B_write_begin + b_idx) * BATCH + A_write_begin + a_idx] = C_reg_tile[a_idx][b_idx];
}
}
};
__global__ void uiuc_transfer(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
extern __shared__ float shared[];
float reduce[MINIBATCH] = {0.0};
for(int n = threadIdx.x; n < 256; n += blockDim.x){
int idx = index[blockIdx.y * 256 + n];
for(unsigned int f = 0; f < MINIBATCH; f++) {
shared[f * 256 + n] = A[(blockIdx.x * MINIBATCH + f) * 1024 + idx];
}
}
__syncthreads();
for(int r = 0; r < 32; ++r){
float val = B[blockIdx.y * 256 * 32 + r * 256 + threadIdx.x];
for(int f = 0; f < MINIBATCH; f++) {
reduce[f] += shared[f * 256 + threadIdx.x] * val;
}
}
for(int f = 0; f < MINIBATCH; f++) {
C[(blockIdx.x * MINIBATCH + f) * 1024 + blockIdx.y * 256 + threadIdx.x] = reduce[f];
}
}
__global__ void uiuc_transfer_opt(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
extern __shared__ float shared[];
float reduce[MINIBATCH] = {0.0};
int groupIdx = threadIdx.x / MINIBATCH;
int lane = threadIdx.x % MINIBATCH;
// int idx = index[blockIdx.y * 256 * 32 + threadIdx.x / MINIBATCH];
// int idx[32];
// for(int i = 0; i < 32; ++i) {
// idx[i] = index[blockIdx.y * 256 * 32 + threadIdx.x / MINIBATCH + i];
// }
// for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
// shared[n] = A[idx[n / 256] * BATCH_SIZE + blockIdx.x * MINIBATCH + lane];
// }
// for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
// shared[n] = A[(blockIdx.y * 256 + n / 32) * BATCH_SIZE + blockIdx.x * 32 + lane];
// }
__syncthreads();
for(int r = 0; r < 32; ++r){
float val = B[blockIdx.y * 256 * 32 + r * 256 + threadIdx.x];
for(int f = 0; f < MINIBATCH; f++) {
reduce[f] += shared[f * 256 + threadIdx.x] * val;
}
}
__syncthreads();
// for(int f = 0; f < MINIBATCH; f++) {
// C[(blockIdx.x * MINIBATCH + f) * 1024 + blockIdx.y * 256 + threadIdx.x] = reduce[f];
// }
// __shfl(0xffffffff, );
// for(int f = 0; f < MINIBATCH; f++) {
// C[(blockIdx.y * 256 + threadIdx.x) * 1024 + blockIdx.x * MINIBATCH + f] = reduce[f];
// }
for(int f = 0; f < MINIBATCH; ++f){
shared[threadIdx.x * MINIBATCH + f] = reduce[f];
}
__syncthreads();
for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
C[(blockIdx.y * 256 + n / MINIBATCH) * BATCH + blockIdx.x * MINIBATCH + n % MINIBATCH] = shared[(threadIdx.x / MINIBATCH) * MINIBATCH + (n % MINIBATCH)];
}
}
__global__ void uiuc_transfer_oo(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
extern __shared__ float shared[];
float reduce[32] = {0.0};
int groupIdx = threadIdx.x / 32;
int groupNum = blockDim.x / 32;
int lane = threadIdx.x % 32;
for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
int idx = index[blockIdx.y * 256 + n / 32];
shared[n] = A[idx * BATCH_SIZE + blockIdx.x * MINIBATCH + (n / 32) * 32 + lane];
}
__syncthreads();
for(int r = 0; r < 32; ++r){
float val = B[blockIdx.y * 256 * 32 + r * 256 + threadIdx.x];
for(int f = 0; f < MINIBATCH; f++) {
reduce[f] += shared[f * 256 + threadIdx.x] * val;
}
}
__syncthreads();
for(int f = 0; f < MINIBATCH; ++f){
shared[threadIdx.x * MINIBATCH + f] = reduce[f];
}
__syncthreads();
for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
C[(blockIdx.y * 256 + n / MINIBATCH) * BATCH + blockIdx.x * MINIBATCH + n % MINIBATCH] = shared[(threadIdx.x / MINIBATCH) * MINIBATCH + (n % MINIBATCH)];
}
}
void test_shared_memory_mm(COOMatrix& coo, std::vector<float> &val, std::vector<int> &row_access, GpuEnv &env) {
float *A;
float *B;
float *C;
int *index;
int mybatch = BATCH_SIZE;
int neuron = 1024;
int bias = 0;
float * input = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(input, 0, sizeof(float) * neuron * mybatch);
float * output = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(output, 0, sizeof(float) * neuron * mybatch);
for(int i = 0; i < mybatch; ++i) {
for(int j = 0; j < neuron; ++j) {
input[i * neuron + j] = 1.0;
}
}
float* W = (float*)malloc(sizeof(float) * val.size());
for(int i = 0; i < val.size(); ++i) {
W[i] = val[i];
}
int* access = (int*)malloc(sizeof(int) * row_access.size());
for(int i = 0; i < row_access.size(); ++i) {
access[i] = row_access[i];
}
Safe_Call(hipMalloc((void**)&A, sizeof(float) * neuron * mybatch));
Safe_Call(hipMemcpy(A, input, sizeof(float) * neuron * mybatch, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&B, sizeof(float) * val.size()));
Safe_Call(hipMemcpy(B, W, sizeof(float) * val.size(), hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&C, sizeof(float) * neuron * mybatch));
Safe_Call(hipMemset(C, 0, sizeof(float) * neuron * mybatch));
Safe_Call(hipMalloc((void**)&index, sizeof(int) * row_access.size()));
Safe_Call(hipMemcpy(index, access, sizeof(int) * row_access.size(), hipMemcpyHostToDevice));
env.add_event("naive_mm");
env.event_start_record("naive_mm");
// dim3 block(4 * ROW_SUCC_LEN);
// dim3 grid(neuron / 32, mybatch / BATCH_BLOCK);
// shared_memory_mm<<<grid, block, sizeof(float) * (BATCH_BLOCK + ROW_SUCC_LEN) * NNZ_PRE_COL, env.get_stream("kernel_timer")>>>(
// A, B, C, index, bias
// );
dim3 block(256);
dim3 grid(mybatch / (MINIBATCH), neuron / 256);
// uiuc_transfer_opt<<<grid, block, sizeof(float) * (MINIBATCH * 256), env.get_stream("naive_mm")>>>(
// A, B, C, index, bias
// );
hipLaunchKernelGGL(( uiuc_transfer_oo), dim3(grid), dim3(block), sizeof(float) * (MINIBATCH * 256), env.get_stream("naive_mm"),
A, B, C, index, bias
);
env.event_stop_record("naive_mm");
float time = env.get_event_time("naive_mm");
Safe_Call(hipMemcpy(output, C, sizeof(float) * neuron * mybatch, hipMemcpyDeviceToHost));
std::cout << "shared mm timer = " << time << std::endl;
std::cout << "shared mm Flops = " << (neuron * BATCH_SIZE * 32 * 2.0) / (time / 1000.0) / 1000 / 1000 / 1000 /1000 << std::endl;
CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output);
}
}; | 4da755c0c80b1d960e24a2ec0667bc461016b719.cu | #include <cuda.h>
#include "../gpu_lib/header.h"
#include "../utils/header.h"
#include <cstdio>
namespace ftxj {
__device__ inline float __ReLU(float x){
return x<0.0?0.0:x>32.0?32.0:x;
};
#define WARPSIZE 32
#define GROUPSIZE 32
#define MINIBATCH 32
#define ROW_SUCC_LEN 32
#define NNZ_PRE_COL 32
#define BATCH_BLOCK 32
#define BATCH_SIZE 1792
#define UNROLL 8
__global__ void shared_memory_mm(float* A, float* B, float* C, int* index, float bias){
__shared__ float A_tile[BATCH_BLOCK][NNZ_PRE_COL];
// __shared__ float B_tile[ROW_SUCC_LEN][NNZ_PRE_COL];
//load A
int group_idx = threadIdx.x / GROUPSIZE;
int batch_start = blockIdx.y * BATCH_BLOCK;
int row_succ_start = blockIdx.x;
for(int i = threadIdx.x; i < BATCH_BLOCK * NNZ_PRE_COL; i += blockDim.x) {
A_tile[i % BATCH_BLOCK][i / BATCH_BLOCK] = A[index[row_succ_start * ROW_SUCC_LEN + i / BATCH_BLOCK] * BATCH_SIZE + batch_start + i % BATCH_BLOCK];
}
//load B
// for(int i = threadIdx.x; i < ROW_SUCC_LEN * NNZ_PRE_COL; i += blockDim.x) {
// B_tile[i / NNZ_PRE_COL][i % NNZ_PRE_COL] = B[row_succ_start * ROW_SUCC_LEN * NNZ_PRE_COL + i];
// }
__syncthreads();
// if(threadIdx.x == 0 && blockIdx.x == 0 && blockIdx.y == 0) {
// for(int i = 0; i < ROW_SUCC_LEN; ++i) {
// for(int j = 0; j < NNZ_PRE_COL; ++j) {
// printf("%f\n", B_tile[i][j]);
// }
// }
// }
register float BB[32] = {
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625,
0.0625, 0.0625, 0.0625, 0.0625
};
int B_col = threadIdx.x % ROW_SUCC_LEN;
int A_batch = (threadIdx.x / ROW_SUCC_LEN) * BATCH_BLOCK / 4;
for(int r = 0; r < BATCH_BLOCK / 4; ++r) {
register float res = bias;
for(int i = 0; i < NNZ_PRE_COL; i += UNROLL) {
res += A_tile[A_batch + r][i + 0] * BB[i + 0]; // bank conflict
res += A_tile[A_batch + r][i + 1] * BB[i + 1]; // bank conflict
res += A_tile[A_batch + r][i + 2] * BB[i + 2]; // bank conflict
res += A_tile[A_batch + r][i + 3] * BB[i + 2]; // bank conflict
res += A_tile[A_batch + r][i + 4] * BB[i + 2]; // bank conflict
res += A_tile[A_batch + r][i + 5] * BB[i + 3]; // bank conflict
res += A_tile[A_batch + r][i + 6] * BB[i + 4]; // bank conflict
res += A_tile[A_batch + r][i + 7] * BB[i + 5]; // bank conflict
}
int res_col_idx = B_col >= 16 ? (row_succ_start * 16 + 512 + B_col - 16) : (row_succ_start * 16 + B_col);
// if(res_col_idx == 528 && A_batch == 0) {
// printf("(%d, %d), (%d), %f\n", blockIdx.x, blockIdx.y, threadIdx.x, res);
// }
C[res_col_idx * BATCH_SIZE + blockIdx.y * BATCH_BLOCK + A_batch + r] = __ReLU(res);
}
};
__global__ void batch_parallel(float* __restrict__ A, float* __restrict__ B, float* __restrict__ C, int* __restrict__ index, float bias){
register float res[8] = {0.0};
// register float BB[32] = { // different thread run on same weight
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625,
// 0.0625, 0.0625, 0.0625, 0.0625
// };
int batch_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * 8;
int index_idx = (col_idx / 32) * 32;
for(int i = 0; i < 32; ++i) {
register float a_tmp = A[index[index_idx + i] * 1024 + batch_idx];
for(int j = 0; j < 8; ++j) {
res[j] += a_tmp * 0.0625;
}
}
if(col_idx ==516 && batch_idx ==0 ) {
printf("%f\n", res[0]);
}
C[(col_idx + 0) * BATCH_SIZE + batch_idx] = __ReLU(res[0]);
C[(col_idx + 1) * BATCH_SIZE + batch_idx] = __ReLU(res[1]);
C[(col_idx + 2) * BATCH_SIZE + batch_idx] = __ReLU(res[2]);
C[(col_idx + 3) * BATCH_SIZE + batch_idx] = __ReLU(res[3]);
C[(col_idx + 4) * BATCH_SIZE + batch_idx] = __ReLU(res[4]);
C[(col_idx + 5) * BATCH_SIZE + batch_idx] = __ReLU(res[5]);
C[(col_idx + 6) * BATCH_SIZE + batch_idx] = __ReLU(res[6]);
C[(col_idx + 7) * BATCH_SIZE + batch_idx] = __ReLU(res[7]);
};
#define BLOCK_LOAD_A_LINE 32
#define BLOCK_LOAD_B_LINE 32
#define BLOCK_REDUCE_LINE 32
#define THREAD_LOAD_A_LINE 4
#define THREAD_LOAD_B_LINE 4
#define THREAD_A_BLOCKS (BLOCK_LOAD_A_LINE / THREAD_LOAD_A_LINE)
#define THREAD_B_BLOCKS (BLOCK_LOAD_B_LINE / THREAD_LOAD_B_LINE)
#define BATCH BATCH_SIZE
__global__ void outer_product_based(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
__shared__ float A_shared_tile[BLOCK_REDUCE_LINE][BLOCK_LOAD_A_LINE];
__shared__ float B_shared_tile[BLOCK_LOAD_B_LINE][BLOCK_REDUCE_LINE];
float C_reg_tile[THREAD_LOAD_A_LINE][THREAD_LOAD_B_LINE] = {0.0};
float A_reg_tile[THREAD_LOAD_A_LINE];
float B_reg_tile[THREAD_LOAD_B_LINE];
const int A_tile_idx = blockIdx.x;
const int B_tile_idx = blockIdx.y;
for(int reduce_axis = 0; reduce_axis < 32; reduce_axis += BLOCK_REDUCE_LINE) {
// Load A, no bank conflict
for(int i = threadIdx.x; i < BLOCK_LOAD_A_LINE * BLOCK_REDUCE_LINE; i += blockDim.x) {
A_shared_tile[i / BLOCK_LOAD_A_LINE][i % BLOCK_LOAD_A_LINE] = A[index[B_tile_idx * 32 + i / BLOCK_LOAD_A_LINE] * BATCH + A_tile_idx * BLOCK_LOAD_A_LINE + i % BLOCK_LOAD_A_LINE];
}
// Load B, no bank conflict
for(int i = threadIdx.x; i < BLOCK_LOAD_B_LINE * BLOCK_REDUCE_LINE; i += blockDim.x) {
B_shared_tile[i / BLOCK_REDUCE_LINE][i % BLOCK_REDUCE_LINE] = B[B_tile_idx * 32 + i];
}
__syncthreads();
//Compute C
for(int r = 0; r < BLOCK_REDUCE_LINE; ++r) {
//Load A to reg
for(int i = 0; i < THREAD_LOAD_A_LINE; ++i) {
A_reg_tile[i] = A_shared_tile[r][(threadIdx.x / THREAD_A_BLOCKS) * THREAD_LOAD_A_LINE + i];
}
//Load B to reg
for(int i = 0; i < THREAD_LOAD_B_LINE; ++i) {
B_reg_tile[i] = B_shared_tile[(threadIdx.x % THREAD_A_BLOCKS) * THREAD_LOAD_B_LINE + i][r];
}
for(int a_idx = 0; a_idx < THREAD_LOAD_A_LINE; ++a_idx) {
for(int b_idx = 0; b_idx < THREAD_LOAD_B_LINE; ++b_idx) {
C_reg_tile[a_idx][b_idx] += A_reg_tile[a_idx] * B_reg_tile[b_idx];
}
}
}
__syncthreads();
}
const int B_write_begin = B_tile_idx * BLOCK_LOAD_B_LINE + (threadIdx.x % THREAD_A_BLOCKS) * THREAD_LOAD_B_LINE;
const int A_write_begin = A_tile_idx * BLOCK_LOAD_A_LINE + (threadIdx.x / THREAD_A_BLOCKS) * THREAD_LOAD_A_LINE;
// write back C
for (int b_idx = 0; b_idx < THREAD_LOAD_B_LINE; ++b_idx) {
for (int a_idx = 0; a_idx < THREAD_LOAD_A_LINE; ++a_idx) {
C[(B_write_begin + b_idx) * BATCH + A_write_begin + a_idx] = C_reg_tile[a_idx][b_idx];
}
}
};
__global__ void uiuc_transfer(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
extern __shared__ float shared[];
float reduce[MINIBATCH] = {0.0};
for(int n = threadIdx.x; n < 256; n += blockDim.x){
int idx = index[blockIdx.y * 256 + n];
for(unsigned int f = 0; f < MINIBATCH; f++) {
shared[f * 256 + n] = A[(blockIdx.x * MINIBATCH + f) * 1024 + idx];
}
}
__syncthreads();
for(int r = 0; r < 32; ++r){
float val = B[blockIdx.y * 256 * 32 + r * 256 + threadIdx.x];
for(int f = 0; f < MINIBATCH; f++) {
reduce[f] += shared[f * 256 + threadIdx.x] * val;
}
}
for(int f = 0; f < MINIBATCH; f++) {
C[(blockIdx.x * MINIBATCH + f) * 1024 + blockIdx.y * 256 + threadIdx.x] = reduce[f];
}
}
__global__ void uiuc_transfer_opt(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
extern __shared__ float shared[];
float reduce[MINIBATCH] = {0.0};
int groupIdx = threadIdx.x / MINIBATCH;
int lane = threadIdx.x % MINIBATCH;
// int idx = index[blockIdx.y * 256 * 32 + threadIdx.x / MINIBATCH];
// int idx[32];
// for(int i = 0; i < 32; ++i) {
// idx[i] = index[blockIdx.y * 256 * 32 + threadIdx.x / MINIBATCH + i];
// }
// for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
// shared[n] = A[idx[n / 256] * BATCH_SIZE + blockIdx.x * MINIBATCH + lane];
// }
// for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
// shared[n] = A[(blockIdx.y * 256 + n / 32) * BATCH_SIZE + blockIdx.x * 32 + lane];
// }
__syncthreads();
for(int r = 0; r < 32; ++r){
float val = B[blockIdx.y * 256 * 32 + r * 256 + threadIdx.x];
for(int f = 0; f < MINIBATCH; f++) {
reduce[f] += shared[f * 256 + threadIdx.x] * val;
}
}
__syncthreads();
// for(int f = 0; f < MINIBATCH; f++) {
// C[(blockIdx.x * MINIBATCH + f) * 1024 + blockIdx.y * 256 + threadIdx.x] = reduce[f];
// }
// __shfl(0xffffffff, );
// for(int f = 0; f < MINIBATCH; f++) {
// C[(blockIdx.y * 256 + threadIdx.x) * 1024 + blockIdx.x * MINIBATCH + f] = reduce[f];
// }
for(int f = 0; f < MINIBATCH; ++f){
shared[threadIdx.x * MINIBATCH + f] = reduce[f];
}
__syncthreads();
for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
C[(blockIdx.y * 256 + n / MINIBATCH) * BATCH + blockIdx.x * MINIBATCH + n % MINIBATCH] = shared[(threadIdx.x / MINIBATCH) * MINIBATCH + (n % MINIBATCH)];
}
}
__global__ void uiuc_transfer_oo(float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, int* __restrict__ index, float bias) {
extern __shared__ float shared[];
float reduce[32] = {0.0};
int groupIdx = threadIdx.x / 32;
int groupNum = blockDim.x / 32;
int lane = threadIdx.x % 32;
for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
int idx = index[blockIdx.y * 256 + n / 32];
shared[n] = A[idx * BATCH_SIZE + blockIdx.x * MINIBATCH + (n / 32) * 32 + lane];
}
__syncthreads();
for(int r = 0; r < 32; ++r){
float val = B[blockIdx.y * 256 * 32 + r * 256 + threadIdx.x];
for(int f = 0; f < MINIBATCH; f++) {
reduce[f] += shared[f * 256 + threadIdx.x] * val;
}
}
__syncthreads();
for(int f = 0; f < MINIBATCH; ++f){
shared[threadIdx.x * MINIBATCH + f] = reduce[f];
}
__syncthreads();
for(int n = threadIdx.x; n < 256 * MINIBATCH; n += blockDim.x){
C[(blockIdx.y * 256 + n / MINIBATCH) * BATCH + blockIdx.x * MINIBATCH + n % MINIBATCH] = shared[(threadIdx.x / MINIBATCH) * MINIBATCH + (n % MINIBATCH)];
}
}
void test_shared_memory_mm(COOMatrix& coo, std::vector<float> &val, std::vector<int> &row_access, GpuEnv &env) {
float *A;
float *B;
float *C;
int *index;
int mybatch = BATCH_SIZE;
int neuron = 1024;
int bias = 0;
float * input = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(input, 0, sizeof(float) * neuron * mybatch);
float * output = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(output, 0, sizeof(float) * neuron * mybatch);
for(int i = 0; i < mybatch; ++i) {
for(int j = 0; j < neuron; ++j) {
input[i * neuron + j] = 1.0;
}
}
float* W = (float*)malloc(sizeof(float) * val.size());
for(int i = 0; i < val.size(); ++i) {
W[i] = val[i];
}
int* access = (int*)malloc(sizeof(int) * row_access.size());
for(int i = 0; i < row_access.size(); ++i) {
access[i] = row_access[i];
}
Safe_Call(cudaMalloc((void**)&A, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMemcpy(A, input, sizeof(float) * neuron * mybatch, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&B, sizeof(float) * val.size()));
Safe_Call(cudaMemcpy(B, W, sizeof(float) * val.size(), cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&C, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMemset(C, 0, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMalloc((void**)&index, sizeof(int) * row_access.size()));
Safe_Call(cudaMemcpy(index, access, sizeof(int) * row_access.size(), cudaMemcpyHostToDevice));
env.add_event("naive_mm");
env.event_start_record("naive_mm");
// dim3 block(4 * ROW_SUCC_LEN);
// dim3 grid(neuron / 32, mybatch / BATCH_BLOCK);
// shared_memory_mm<<<grid, block, sizeof(float) * (BATCH_BLOCK + ROW_SUCC_LEN) * NNZ_PRE_COL, env.get_stream("kernel_timer")>>>(
// A, B, C, index, bias
// );
dim3 block(256);
dim3 grid(mybatch / (MINIBATCH), neuron / 256);
// uiuc_transfer_opt<<<grid, block, sizeof(float) * (MINIBATCH * 256), env.get_stream("naive_mm")>>>(
// A, B, C, index, bias
// );
uiuc_transfer_oo<<<grid, block, sizeof(float) * (MINIBATCH * 256), env.get_stream("naive_mm")>>>(
A, B, C, index, bias
);
env.event_stop_record("naive_mm");
float time = env.get_event_time("naive_mm");
Safe_Call(cudaMemcpy(output, C, sizeof(float) * neuron * mybatch, cudaMemcpyDeviceToHost));
std::cout << "shared mm timer = " << time << std::endl;
std::cout << "shared mm Flops = " << (neuron * BATCH_SIZE * 32 * 2.0) / (time / 1000.0) / 1000 / 1000 / 1000 /1000 << std::endl;
CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output);
}
}; |
2fc0903ad5c42f66a6607b6aa0018af40b88cff0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void mykernel(void){
}
int main(void){
int deviceCount;
hipLaunchKernelGGL(( mykernel), dim3(1),dim3(1), 0, 0, );
hipGetDeviceCount(&deviceCount);
printf("Hello World! Total Device: %d\n", deviceCount);
return 0;
}
| 2fc0903ad5c42f66a6607b6aa0018af40b88cff0.cu | #include <stdio.h>
__global__ void mykernel(void){
}
int main(void){
int deviceCount;
mykernel<<<1,1>>>();
cudaGetDeviceCount(&deviceCount);
printf("Hello World! Total Device: %d\n", deviceCount);
return 0;
}
|
7962a07ee9737ff14120f6036f8924395b36ff29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
constexpr int _MAX_CUDA_THREADS = 1024;
// max number of blocks per grid direction
constexpr int _MAX_CUDA_BLOCKS = 65535;
/*
* Create a 1-D CUDA thread grid using the total_threads and number of threads per block.
* Basically, computes the number of blocks but no more than _MAX_CUDA_BLOCKS.
*/
struct ThreadGrid1d{
// Compute the threads and blocks.
ThreadGrid1d(long long total_threads, long long num_per_block) :
threads(static_cast<int>(num_per_block)),
blocks(static_cast<int>(::min(total_threads / threads + ((total_threads % threads == 0) ? 0 : 1), static_cast<long long>(_MAX_CUDA_BLOCKS))))
{}
// number of threads
int const threads;
// number of blocks
int const blocks;
};
namespace TasGrid{
template<typename T>
void TasGpu::dtrans2can(AccelerationContext const*, bool use01, int dims, int num_x, int pad_size, double const *gpu_trans_a, double const *gpu_trans_b, T const *gpu_x_transformed, T *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
hipLaunchKernelGGL(( tasgpu_transformed_to_canonical<T, double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), (2*pad_size) * sizeof(double), 0, dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01)hipLaunchKernelGGL(( tasgpu_m11_to_01<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, dims * num_x, gpu_x_canonical);
}
template void TasGpu::dtrans2can<double>(AccelerationContext const*, bool, int, int, int, double const*, double const*, double const*, double*);
template void TasGpu::dtrans2can<float>(AccelerationContext const*, bool, int, int, int, double const*, double const*, float const*, float*);
// local polynomial basis functions, DENSE algorithm
template<typename T>
void TasGpu::devalpwpoly(AccelerationContext const*, int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *gpu_x, const T *gpu_nodes, const T *gpu_support, T *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 0, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_localp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_localp0, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2:hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_localpb, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_semilocalp){
hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 2, rule_semilocalp, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}else{ // rule == wavelet
hipLaunchKernelGGL(( tasgpu_devalpwpoly<T, 1, rule_wavelet, 32, 64>), dim3(num_blocks), dim3(1024), 0, 0, dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
template void TasGpu::devalpwpoly<double>(AccelerationContext const*, int, TypeOneDRule, int, int, int, const double*, const double*, const double*, double*);
template void TasGpu::devalpwpoly<float>(AccelerationContext const*, int, TypeOneDRule, int, int, int, const float*, const float*, const float*, float*);
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(AccelerationContext const*, int order, TypeOneDRule rule, int dims, int num_x,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
if (rule == rule_localp){
switch(order){
case 0:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
hipLaunchKernelGGL(( tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill>), dim3(num_blocks), dim3(THREADS), 0, 0,
dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
template<typename T>
void TasGpu::devalpwpoly_sparse(AccelerationContext const *acc, int order, TypeOneDRule rule, int dims, int num_x, const T *gpu_x,
const GpuVector<T> &gpu_nodes, const GpuVector<T> &gpu_support,
const GpuVector<int> &gpu_hpntr, const GpuVector<int> &gpu_hindx, const GpuVector<int> &gpu_hroots,
GpuVector<int> &gpu_spntr, GpuVector<int> &gpu_sindx, GpuVector<T> &gpu_svals){
gpu_spntr.resize(acc, num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<T, 64, 46, false>
(acc, order, rule, dims, num_x, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(acc, cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(acc, cpu_spntr);
gpu_sindx.resize(acc, nz);
gpu_svals.resize(acc, nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<T, 64, 46, true>
(acc, order, rule, dims, num_x, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
template void TasGpu::devalpwpoly_sparse<double>(AccelerationContext const*, int, TypeOneDRule, int, int, const double*, const GpuVector<double>&, const GpuVector<double>&,
const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&,
GpuVector<int>&, GpuVector<int>&, GpuVector<double>&);
template void TasGpu::devalpwpoly_sparse<float>(AccelerationContext const*, int, TypeOneDRule, int, int, const float*, const GpuVector<float>&, const GpuVector<float>&,
const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&,
GpuVector<int>&, GpuVector<int>&, GpuVector<float>&);
// Sequence Grid basis evaluations
template<typename T>
void TasGpu::devalseq(AccelerationContext const *acc, int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x, const GpuVector<int> &num_nodes,
const GpuVector<int> &points, const GpuVector<T> &nodes, const GpuVector<T> &coeffs, T *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
GpuVector<int> gpu_offsets(acc, offsets);
GpuVector<T> cache1D(acc, num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_build_cache<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dseq_eval_sharedpoints<T, 32>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
template void TasGpu::devalseq<double>(AccelerationContext const*, int dims, int num_x, const std::vector<int> &max_levels,
const double *gpu_x, const GpuVector<int> &num_nodes,
const GpuVector<int> &points, const GpuVector<double> &nodes, const GpuVector<double> &coeffs, double *gpu_result);
template void TasGpu::devalseq<float>(AccelerationContext const*, int dims, int num_x, const std::vector<int> &max_levels,
const float *gpu_x, const GpuVector<int> &num_nodes,
const GpuVector<int> &points, const GpuVector<float> &nodes, const GpuVector<float> &coeffs, float *gpu_result);
// Fourier Grid basis evaluations
template<typename T>
void TasGpu::devalfor(AccelerationContext const *acc, int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x,
const GpuVector<int> &num_nodes, const GpuVector<int> &points, T *gpu_wreal, typename GpuVector<T>::value_type *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
GpuVector<int> gpu_offsets(acc, offsets);
GpuVector<T> cache1D(acc, num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_dfor_build_cache<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<T, 32, true>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
hipLaunchKernelGGL(( tasgpu_dfor_eval_sharedpoints<T, 32, false>), dim3(num_blocks), dim3(1024), 0, 0,
dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
template void TasGpu::devalfor<double>(AccelerationContext const*, int, int, const std::vector<int>&, const double*, const GpuVector<int>&, const GpuVector<int>&, double*, double*);
template void TasGpu::devalfor<float>(AccelerationContext const*, int, int, const std::vector<int>&, const float*, const GpuVector<int>&, const GpuVector<int>&, float*, float*);
template<typename T>
void TasGpu::devalglo(AccelerationContext const *acc, bool is_nested, bool is_clenshawcurtis0,
int dims, int num_x, int num_p, int num_basis,
T const *gpu_x, GpuVector<T> const &nodes, GpuVector<T> const &coeff, GpuVector<T> const &tensor_weights,
GpuVector<int> const &nodes_per_level, GpuVector<int> const &offset_per_level, GpuVector<int> const &map_dimension, GpuVector<int> const &map_level,
GpuVector<int> const &active_tensors, GpuVector<int> const &active_num_points, GpuVector<int> const &dim_offsets,
GpuVector<int> const &map_tensor, GpuVector<int> const &map_index, GpuVector<int> const &map_reference, T *gpu_result){
GpuVector<T> cache(acc, num_x, num_basis);
int num_blocks = (int) map_dimension.size();
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
if (is_nested){
if (is_clenshawcurtis0){
hipLaunchKernelGGL(( tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, true>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}else{
hipLaunchKernelGGL(( tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, false>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
}else{
hipLaunchKernelGGL(( tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, false, false>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
int mat_size = num_x * num_p;
num_blocks = num_x / _MAX_CUDA_THREADS + ((mat_size % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
hipLaunchKernelGGL(( tasgpu_dglo_eval_zero<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, mat_size, gpu_result);
num_blocks = (int) map_tensor.size();
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
hipLaunchKernelGGL(( tasgpu_dglo_eval_sharedpoints<T, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0,
dims, num_x, (int) map_tensor.size(), num_p, cache.data(),
tensor_weights.data(), offset_per_level.data(), dim_offsets.data(), active_tensors.data(), active_num_points.data(),
map_tensor.data(), map_index.data(), map_reference.data(), gpu_result);
}
template void TasGpu::devalglo<double>(AccelerationContext const*, bool, bool, int, int, int, int,
double const*, GpuVector<double> const&, GpuVector<double> const&, GpuVector<double> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, double*);
template void TasGpu::devalglo<float>(AccelerationContext const*, bool, bool, int, int, int, int,
float const*, GpuVector<float> const&, GpuVector<float> const&, GpuVector<float> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, float*);
void TasGpu::fillDataGPU(AccelerationContext const*, double value, long long n, long long stride, double data[]){
if (stride == 1){
ThreadGrid1d tgrid(n, _MAX_CUDA_THREADS);
hipLaunchKernelGGL(( tascuda_vfill<double, _MAX_CUDA_THREADS>), dim3(tgrid.blocks), dim3(tgrid.threads), 0, 0, n, data, value);
}else{
ThreadGrid1d tgrid(n, 32);
hipLaunchKernelGGL(( tascuda_sfill<double, 32>), dim3(tgrid.blocks), dim3(tgrid.threads), 0, 0, n, stride, data, value);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > _MAX_CUDA_BLOCKS) blocks = _MAX_CUDA_BLOCKS;
hipLaunchKernelGGL(( tasgpu_cudaTgemm<double, 32, 96>), dim3(blocks), dim3(1024), 0, 0, M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
hipLaunchKernelGGL(( tasgpu_sparse_matmul<double, 64>), dim3(blocks), dim3(64), 0, 0, M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< _MAX_CUDA_BLOCKS){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< _MAX_CUDA_BLOCKS){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< _MAX_CUDA_BLOCKS){
hipLaunchKernelGGL(( tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
hipLaunchKernelGGL(( tascuda_fill<double, _MAX_CUDA_THREADS>), dim3(num_blocks), dim3(_MAX_CUDA_THREADS), 0, 0, n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
hipLaunchKernelGGL(( tascuda_sparse_to_dense<double, 64>), dim3(num_blocks), dim3(64), 0, 0, num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
| 7962a07ee9737ff14120f6036f8924395b36ff29.cu | /*
* Copyright (c) 2017, Miroslav Stoyanov
*
* This file is part of
* Toolkit for Adaptive Stochastic Modeling And Non-Intrusive ApproximatioN: TASMANIAN
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
* and the following disclaimer in the documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
* or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* UT-BATTELLE, LLC AND THE UNITED STATES GOVERNMENT MAKE NO REPRESENTATIONS AND DISCLAIM ALL WARRANTIES, BOTH EXPRESSED AND IMPLIED.
* THERE ARE NO EXPRESS OR IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENT,
* COPYRIGHT, TRADEMARK, OR OTHER PROPRIETARY RIGHTS, OR THAT THE SOFTWARE WILL ACCOMPLISH THE INTENDED RESULTS OR THAT THE SOFTWARE OR ITS USE WILL NOT RESULT IN INJURY OR DAMAGE.
* THE USER ASSUMES RESPONSIBILITY FOR ALL LIABILITIES, PENALTIES, FINES, CLAIMS, CAUSES OF ACTION, AND COSTS AND EXPENSES, CAUSED BY, RESULTING FROM OR ARISING OUT OF,
* IN WHOLE OR IN PART THE USE, STORAGE OR DISPOSAL OF THE SOFTWARE.
*/
#ifndef __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#define __TASMANIAN_SPARSE_GRID_CUDA_KERNELS_CU
#include "tsgAcceleratedDataStructures.hpp"
#include "tsgCudaLinearAlgebra.hpp"
#include "tsgCudaBasisEvaluations.hpp"
// several kernels assume a linear distribution of the threads and can be executed with "practically unlimited" number of threads
// thus we can set this to the CUDA max number of threads, based on the current cuda version
constexpr int _MAX_CUDA_THREADS = 1024;
// max number of blocks per grid direction
constexpr int _MAX_CUDA_BLOCKS = 65535;
/*
* Create a 1-D CUDA thread grid using the total_threads and number of threads per block.
* Basically, computes the number of blocks but no more than _MAX_CUDA_BLOCKS.
*/
struct ThreadGrid1d{
// Compute the threads and blocks.
ThreadGrid1d(long long total_threads, long long num_per_block) :
threads(static_cast<int>(num_per_block)),
blocks(static_cast<int>(std::min(total_threads / threads + ((total_threads % threads == 0) ? 0 : 1), static_cast<long long>(_MAX_CUDA_BLOCKS))))
{}
// number of threads
int const threads;
// number of blocks
int const blocks;
};
namespace TasGrid{
template<typename T>
void TasGpu::dtrans2can(AccelerationContext const*, bool use01, int dims, int num_x, int pad_size, double const *gpu_trans_a, double const *gpu_trans_b, T const *gpu_x_transformed, T *gpu_x_canonical){
int num_blocks = (num_x * dims) / _MAX_CUDA_THREADS + (((num_x * dims) % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
tasgpu_transformed_to_canonical<T, double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS, (2*pad_size) * sizeof(double)>>>(dims, num_x, pad_size, gpu_trans_a, gpu_trans_b, gpu_x_transformed, gpu_x_canonical);
if (use01) tasgpu_m11_to_01<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(dims * num_x, gpu_x_canonical);
}
template void TasGpu::dtrans2can<double>(AccelerationContext const*, bool, int, int, int, double const*, double const*, double const*, double*);
template void TasGpu::dtrans2can<float>(AccelerationContext const*, bool, int, int, int, double const*, double const*, float const*, float*);
// local polynomial basis functions, DENSE algorithm
template<typename T>
void TasGpu::devalpwpoly(AccelerationContext const*, int order, TypeOneDRule rule, int dims, int num_x, int num_points, const T *gpu_x, const T *gpu_nodes, const T *gpu_support, T *gpu_y){
// each block thread runs 1024 threads and processes 32 points (or basis functions)
int num_blocks = (num_points / 32) + ((num_points % 32 == 0) ? 0 : 1);
// order == 1 is considered "default" so that the compiler doesn't complain about missing default statement
// semilocalp cannot have order less than 2, only rule_localp can have order 0 (this gets overwrittein in makeLocalPolynomialGrid())
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly<T, 0, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
case 2: tasgpu_devalpwpoly<T, 2, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<T, 1, rule_localp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localp0){
switch(order){
case 2: tasgpu_devalpwpoly<T, 2, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<T, 1, rule_localp0, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_localpb){
switch(order){
case 2: tasgpu_devalpwpoly<T, 2, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
break;
default:
tasgpu_devalpwpoly<T, 1, rule_localpb, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}else if (rule == rule_semilocalp){
tasgpu_devalpwpoly<T, 2, rule_semilocalp, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}else{ // rule == wavelet
tasgpu_devalpwpoly<T, 1, rule_wavelet, 32, 64><<<num_blocks, 1024>>>(dims, num_x, num_points, gpu_x, gpu_nodes, gpu_support, gpu_y);
}
}
template void TasGpu::devalpwpoly<double>(AccelerationContext const*, int, TypeOneDRule, int, int, int, const double*, const double*, const double*, double*);
template void TasGpu::devalpwpoly<float>(AccelerationContext const*, int, TypeOneDRule, int, int, int, const float*, const float*, const float*, float*);
// there is a switch statement that realizes templates for each combination of rule/order
// make one function that covers that switch, the rest is passed from devalpwpoly_sparse
template<typename T, int THREADS, int TOPLEVEL, bool fill>
inline void devalpwpoly_sparse_realize_rule_order(AccelerationContext const*, int order, TypeOneDRule rule, int dims, int num_x,
const T *x, const T *nodes, const T *support,
const int *hpntr, const int *hindx, int num_roots, const int *roots,
int *spntr, int *sindx, T *svals){
int num_blocks = num_x / THREADS + ((num_x % THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
if (rule == rule_localp){
switch(order){
case 0:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 0, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localp0){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localp0, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else if (rule == rule_localpb){
switch(order){
case 2:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
break;
default:
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 1, rule_localpb, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}else{ // rule == rule_semilocalp
tasgpu_devalpwpoly_sparse<T, THREADS, TOPLEVEL, 2, rule_semilocalp, fill><<<num_blocks, THREADS>>>
(dims, num_x, x, nodes, support, hpntr, hindx, num_roots, roots, spntr, sindx, svals);
}
}
// local polynomial basis functions, SPARSE algorithm (2 passes, one pass to compue the non-zeros and one pass to evaluate)
template<typename T>
void TasGpu::devalpwpoly_sparse(AccelerationContext const *acc, int order, TypeOneDRule rule, int dims, int num_x, const T *gpu_x,
const GpuVector<T> &gpu_nodes, const GpuVector<T> &gpu_support,
const GpuVector<int> &gpu_hpntr, const GpuVector<int> &gpu_hindx, const GpuVector<int> &gpu_hroots,
GpuVector<int> &gpu_spntr, GpuVector<int> &gpu_sindx, GpuVector<T> &gpu_svals){
gpu_spntr.resize(acc, num_x + 1);
// call with fill == false to count the non-zeros per row of the matrix
devalpwpoly_sparse_realize_rule_order<T, 64, 46, false>
(acc, order, rule, dims, num_x, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), 0, 0);
std::vector<int> cpu_spntr;
gpu_spntr.unload(acc, cpu_spntr);
cpu_spntr[0] = 0;
int nz = 0;
for(auto &i : cpu_spntr){
i += nz;
nz = i;
}
gpu_spntr.load(acc, cpu_spntr);
gpu_sindx.resize(acc, nz);
gpu_svals.resize(acc, nz);
// call with fill == true to load the non-zeros
devalpwpoly_sparse_realize_rule_order<T, 64, 46, true>
(acc, order, rule, dims, num_x, gpu_x, gpu_nodes.data(), gpu_support.data(),
gpu_hpntr.data(), gpu_hindx.data(), (int) gpu_hroots.size(), gpu_hroots.data(), gpu_spntr.data(), gpu_sindx.data(), gpu_svals.data());
}
template void TasGpu::devalpwpoly_sparse<double>(AccelerationContext const*, int, TypeOneDRule, int, int, const double*, const GpuVector<double>&, const GpuVector<double>&,
const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&,
GpuVector<int>&, GpuVector<int>&, GpuVector<double>&);
template void TasGpu::devalpwpoly_sparse<float>(AccelerationContext const*, int, TypeOneDRule, int, int, const float*, const GpuVector<float>&, const GpuVector<float>&,
const GpuVector<int>&, const GpuVector<int>&, const GpuVector<int>&,
GpuVector<int>&, GpuVector<int>&, GpuVector<float>&);
// Sequence Grid basis evaluations
template<typename T>
void TasGpu::devalseq(AccelerationContext const *acc, int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x, const GpuVector<int> &num_nodes,
const GpuVector<int> &points, const GpuVector<T> &nodes, const GpuVector<T> &coeffs, T *gpu_result){
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + num_x * (max_levels[d-1] + 1);
size_t num_total = offsets[dims-1] + num_x * (max_levels[dims-1] + 1);
int maxl = max_levels[0]; for(auto l : max_levels) if (maxl < l) maxl = l;
GpuVector<int> gpu_offsets(acc, offsets);
GpuVector<T> cache1D(acc, num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dseq_build_cache<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, nodes.data(), coeffs.data(), maxl+1, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
tasgpu_dseq_eval_sharedpoints<T, 32><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_result);
}
template void TasGpu::devalseq<double>(AccelerationContext const*, int dims, int num_x, const std::vector<int> &max_levels,
const double *gpu_x, const GpuVector<int> &num_nodes,
const GpuVector<int> &points, const GpuVector<double> &nodes, const GpuVector<double> &coeffs, double *gpu_result);
template void TasGpu::devalseq<float>(AccelerationContext const*, int dims, int num_x, const std::vector<int> &max_levels,
const float *gpu_x, const GpuVector<int> &num_nodes,
const GpuVector<int> &points, const GpuVector<float> &nodes, const GpuVector<float> &coeffs, float *gpu_result);
// Fourier Grid basis evaluations
template<typename T>
void TasGpu::devalfor(AccelerationContext const *acc, int dims, int num_x, const std::vector<int> &max_levels, const T *gpu_x,
const GpuVector<int> &num_nodes, const GpuVector<int> &points, T *gpu_wreal, typename GpuVector<T>::value_type *gpu_wimag){
std::vector<int> max_nodes(dims);
for(int j=0; j<dims; j++){
int n = 1;
for(int i=0; i<max_levels[j]; i++) n *= 3;
max_nodes[j] = n;
}
std::vector<int> offsets(dims);
offsets[0] = 0;
for(int d=1; d<dims; d++) offsets[d] = offsets[d-1] + 2 * num_x * (max_nodes[d-1] + 1);
size_t num_total = offsets[dims-1] + 2 * num_x * (max_nodes[dims-1] + 1);
GpuVector<int> gpu_offsets(acc, offsets);
GpuVector<T> cache1D(acc, num_total);
int num_blocks = num_x / _MAX_CUDA_THREADS + ((num_x % _MAX_CUDA_THREADS == 0) ? 0 : 1);
tasgpu_dfor_build_cache<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, gpu_x, gpu_offsets.data(), num_nodes.data(), cache1D.data());
num_blocks = num_x / 32 + ((num_x % 32 == 0) ? 0 : 1);
if (gpu_wimag == 0){
tasgpu_dfor_eval_sharedpoints<T, 32, true><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, 0);
}else{
tasgpu_dfor_eval_sharedpoints<T, 32, false><<<num_blocks, 1024>>>
(dims, num_x, (int) points.size() / dims, points.data(), gpu_offsets.data(), cache1D.data(), gpu_wreal, gpu_wimag);
}
}
template void TasGpu::devalfor<double>(AccelerationContext const*, int, int, const std::vector<int>&, const double*, const GpuVector<int>&, const GpuVector<int>&, double*, double*);
template void TasGpu::devalfor<float>(AccelerationContext const*, int, int, const std::vector<int>&, const float*, const GpuVector<int>&, const GpuVector<int>&, float*, float*);
template<typename T>
void TasGpu::devalglo(AccelerationContext const *acc, bool is_nested, bool is_clenshawcurtis0,
int dims, int num_x, int num_p, int num_basis,
T const *gpu_x, GpuVector<T> const &nodes, GpuVector<T> const &coeff, GpuVector<T> const &tensor_weights,
GpuVector<int> const &nodes_per_level, GpuVector<int> const &offset_per_level, GpuVector<int> const &map_dimension, GpuVector<int> const &map_level,
GpuVector<int> const &active_tensors, GpuVector<int> const &active_num_points, GpuVector<int> const &dim_offsets,
GpuVector<int> const &map_tensor, GpuVector<int> const &map_index, GpuVector<int> const &map_reference, T *gpu_result){
GpuVector<T> cache(acc, num_x, num_basis);
int num_blocks = (int) map_dimension.size();
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
if (is_nested){
if (is_clenshawcurtis0){
tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, true><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}else{
tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, true, false><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
}else{
tasgpu_dglo_build_cache<T, _MAX_CUDA_THREADS, false, false><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_dimension.size(), gpu_x, nodes.data(), coeff.data(),
nodes_per_level.data(), offset_per_level.data(), dim_offsets.data(),
map_dimension.data(), map_level.data(), cache.data());
}
int mat_size = num_x * num_p;
num_blocks = num_x / _MAX_CUDA_THREADS + ((mat_size % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
tasgpu_dglo_eval_zero<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(mat_size, gpu_result);
num_blocks = (int) map_tensor.size();
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
tasgpu_dglo_eval_sharedpoints<T, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>
(dims, num_x, (int) map_tensor.size(), num_p, cache.data(),
tensor_weights.data(), offset_per_level.data(), dim_offsets.data(), active_tensors.data(), active_num_points.data(),
map_tensor.data(), map_index.data(), map_reference.data(), gpu_result);
}
template void TasGpu::devalglo<double>(AccelerationContext const*, bool, bool, int, int, int, int,
double const*, GpuVector<double> const&, GpuVector<double> const&, GpuVector<double> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, double*);
template void TasGpu::devalglo<float>(AccelerationContext const*, bool, bool, int, int, int, int,
float const*, GpuVector<float> const&, GpuVector<float> const&, GpuVector<float> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&,
GpuVector<int> const&, GpuVector<int> const&, GpuVector<int> const&, float*);
void TasGpu::fillDataGPU(AccelerationContext const*, double value, long long n, long long stride, double data[]){
if (stride == 1){
ThreadGrid1d tgrid(n, _MAX_CUDA_THREADS);
tascuda_vfill<double, _MAX_CUDA_THREADS><<<tgrid.blocks, tgrid.threads>>>(n, data, value);
}else{
ThreadGrid1d tgrid(n, 32);
tascuda_sfill<double, 32><<<tgrid.blocks, tgrid.threads>>>(n, stride, data, value);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Linear Algebra
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef __TASMANIAN_COMPILE_FALLBACK_CUDA_KERNELS__
void TasCUDA::cudaDgemm(int M, int N, int K, const double *gpu_a, const double *gpu_b, double *gpu_c){ // gpu_c = gpu_a * gpu_b, gpu_c is M by N
int blocks = (N / 96) + (((N % 96) == 0) ? 0 : 1);
blocks *= (M / 96) + (((M % 96) == 0) ? 0 : 1);
while(blocks > _MAX_CUDA_BLOCKS) blocks = _MAX_CUDA_BLOCKS;
tasgpu_cudaTgemm<double, 32, 96><<<blocks, 1024>>>(M, N, K, gpu_a, gpu_b, gpu_c);
}
void TasCUDA::cudaSparseMatmul(int M, int N, int num_nz, const int* gpu_spntr, const int* gpu_sindx, const double* gpu_svals, const double *gpu_B, double *gpu_C){
int blocks = M / 64 + ((M % 64 == 0) ? 0 : 1);
tasgpu_sparse_matmul<double, 64><<<blocks, 64>>>(M, N, num_nz, gpu_spntr, gpu_sindx, gpu_svals, gpu_B, gpu_C);
}
void TasCUDA::cudaSparseVecDenseMat(int M, int N, int num_nz, const double *A, const int *indx, const double *vals, double *C){
int num_blocks = N / _MAX_CUDA_THREADS + ((N % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks< _MAX_CUDA_BLOCKS){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 1><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (2 * _MAX_CUDA_THREADS) + ((N % (2 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< _MAX_CUDA_BLOCKS){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 2><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}else{
num_blocks = N / (3 * _MAX_CUDA_THREADS) + ((N % (3 * _MAX_CUDA_THREADS) == 0) ? 0 : 1);
if (num_blocks< _MAX_CUDA_BLOCKS){
tasgpu_sparse_matveci<double, _MAX_CUDA_THREADS, 3><<<num_blocks, _MAX_CUDA_THREADS>>>(M, N, num_nz, A, indx, vals, C);
}
}
}
}
void TasCUDA::convert_sparse_to_dense(int num_rows, int num_columns, const int *pntr, const int *indx, const double *vals, double *destination){
int n = num_rows * num_columns;
int num_blocks = n / _MAX_CUDA_THREADS + ((n % _MAX_CUDA_THREADS == 0) ? 0 : 1);
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
tascuda_fill<double, _MAX_CUDA_THREADS><<<num_blocks, _MAX_CUDA_THREADS>>>(n, 0.0, destination);
num_blocks = num_rows;
if (num_blocks >= _MAX_CUDA_BLOCKS) num_blocks = _MAX_CUDA_BLOCKS;
tascuda_sparse_to_dense<double, 64><<<num_blocks, 64>>>(num_rows, num_columns, pntr, indx, vals, destination);
}
#endif
}
#endif
|
a71c9c99c7f03693e0c62fd015cc35f346bfada5.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//rough summing kernel (does not need to be efficient)
__global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, KC_FP_TYPE gPrior, KC_FP_TYPE lPrior) {
int nsum = blockIdx.x*blockDim.x+threadIdx.x;
if(nsum == 0) {
der_sum[0] = lPrior;
for(int idx = 0; idx < NT; idx++) {
der_sum[0] += der[idx];
}
}
else if(nsum == 1) {
G_sum[0] = gPrior;
for(int idx = 0; idx < NT; idx++) {
G_sum[0] -= G[idx];
}
}
else if(nsum == 2) {
ll_sum[0] = 0;
for(int idx = 0; idx < NT; idx++) {
ll_sum[0] += ll[idx];
}
}
}
//derivates of firing rate function w.r.t. gamma (assuming fixed latent variables)
__device__ KC_FP_TYPE h(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd) {
if(modelInd > 0.001) {
KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN));
return KC_MIN(KC_POW(logex*1.00000,modelInd)*dt,KC_MAXN);
}
else {
return KC_MIN(exp(lambda*gamma)*dt,KC_MAXN);
}
}
__device__ KC_FP_TYPE dh(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd) {
if( modelInd > 0.001) {
KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN));
KC_FP_TYPE log_der = KC_MIN(lambda/(1+KC_MIN(KC_MAXN,KC_MAX(KC_MINN,exp(-lambda*gamma)))),KC_MAXN);
KC_FP_TYPE der = modelInd*KC_POW(logex*1.00000,modelInd-1.00)*log_der;
return der*dt;
}
else {
return KC_MIN(dt*lambda*KC_EXP(gamma*lambda),KC_MAXN);
}
}
// computes log p(single trial | gamma, fixed lambdas)
__global__ void kcBoundaryLikelihoodTrial(KC_FP_TYPE * y, KC_FP_TYPE * lambdas, int * crossingTimes, int * mBlkIdx, KC_FP_TYPE g, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE modelInd) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
trialSum[idx] = 0;
trialSumRiemann[idx] = 0;
llSum[idx] = 0;
for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) {
KC_FP_TYPE trueLambda = fmin(1, ((ii-mBlkIdx[idx]) < crossingTimes[idx])?lambdas[ii]:1);
KC_FP_TYPE fr = KC_MAX(KC_MINN,h(trueLambda,g,1,modelInd));
llSum[idx] += y[ii]*(KC_LOG(fr)+KC_LOG(dt)) - dt*fr -lgamma(y[ii]+1);
KC_FP_TYPE dr = dh(trueLambda,g,1,modelInd);
trialSum[idx] += (y[ii]/fr-dt)*dr;
trialSumRiemann[idx] += -dt*dr*dr/fr;
}
}
}
//Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable
// as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma
//args
// 0 = lambda (latent variables, on GPU. Same size as y)
// 1 = auxillary variable - threshold crossing time (latent variable boundary crossing time, on GPU. vector length number of trials: NT)
// 2 = y (observations, on GPU)
// 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 4 = g (absorbing boundary effective height)
// 5 = dt (bin size in seconds)
// 6 = gPrior (contains Fisher information of gamma)
// 7 = lPrior (contains log prior probability of gamma)
// 8 = modelInd (power if use log1p transfer function, 0 if using exp)
//
//outputs (left-hand side)
// 0 = log p(y|lambdas,gamma)
// 1 = d/dg log p(y|lambdas,gamma)
// 2 = d^2/d^2g log p(y|lambdas,gamma)
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
hipError_t ce;
//loads up trial information
unsigned int TT = kcGetArrayNumEl(prhs[0]);
int * crossingTimes = kcGetArrayDataInt(prhs[1]);
KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT);
int * trIdx = kcGetArrayDataInt(prhs[3]);
unsigned int NT = kcGetArrayNumEl(prhs[3])-1;
KC_FP_TYPE dt = mxGetScalar(prhs[5]);
//loads gamma and latent variables
KC_FP_TYPE g = mxGetScalar(prhs[4]);
KC_FP_TYPE * lambda = kcGetArrayData(prhs[0]);
//loads log prior probability of the gamma value
if(mxGetClassID(prhs[6]) != KC_FP_TYPE_MATLAB) {
mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!");
}
KC_FP_TYPE gPrior = mxGetScalar(prhs[6]);
KC_FP_TYPE lPrior = mxGetScalar(prhs[7]);
KC_FP_TYPE modelInd = mxGetScalar(prhs[8]);
//sets up space for computations on GPU
KC_FP_TYPE * der_log_p_y;
checkCudaErrors(hipMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)));
KC_FP_TYPE * der_log_p_y_sum;
checkCudaErrors(hipMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(1)));
KC_FP_TYPE * log_p_y;
checkCudaErrors(hipMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * log_p_y_sum;
checkCudaErrors(hipMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1));
KC_FP_TYPE * G_log_p_y1;
checkCudaErrors(hipMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)));
KC_FP_TYPE * G_log_p_y_sum;
checkCudaErrors(hipMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(1)*(1)));
//sets up CUDA variables
int blockSize = 2;
int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1);
//gets each trials likelihood + derivates of gamma
hipLaunchKernelGGL(( kcBoundaryLikelihoodTrial), dim3(numBlocks),dim3(blockSize) , 0, 0, y,lambda,crossingTimes,trIdx,g,dt, NT,log_p_y,der_log_p_y,G_log_p_y1,modelInd);
checkCudaErrors(hipDeviceSynchronize());
//sums up all the trials' likelihoods and derivatives with respect to gamma
int nBlocksC = 3;
int blockSizeC = 1;
hipLaunchKernelGGL(( kcSumLangevinVars) , dim3(nBlocksC),dim3(blockSizeC) , 0, 0, der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, gPrior, lPrior);
checkCudaErrors(hipDeviceSynchronize());
//pushes answers back to MATLAB
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,hipMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(1),hipMemcpyDeviceToHost));
}
if(nlhs > 2) {
plhs[2] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(1)*(1),hipMemcpyDeviceToHost));
}
//clears up GPU variables
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(log_p_y));
checkCudaErrors(hipFree(log_p_y_sum));
checkCudaErrors(hipFree(der_log_p_y));
checkCudaErrors(hipFree(der_log_p_y_sum));
checkCudaErrors(hipFree(G_log_p_y1));
checkCudaErrors(hipFree(G_log_p_y_sum));
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error at the end of kcLangevinStep.cu ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
| a71c9c99c7f03693e0c62fd015cc35f346bfada5.cu |
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
//rough summing kernel (does not need to be efficient)
__global__ void kcSumLangevinVars(KC_FP_TYPE * der, KC_FP_TYPE * der_sum, KC_FP_TYPE * G, KC_FP_TYPE * G_sum, KC_FP_TYPE * ll, KC_FP_TYPE * ll_sum, int * mBlkIdx, int NT, KC_FP_TYPE gPrior, KC_FP_TYPE lPrior) {
int nsum = blockIdx.x*blockDim.x+threadIdx.x;
if(nsum == 0) {
der_sum[0] = lPrior;
for(int idx = 0; idx < NT; idx++) {
der_sum[0] += der[idx];
}
}
else if(nsum == 1) {
G_sum[0] = gPrior;
for(int idx = 0; idx < NT; idx++) {
G_sum[0] -= G[idx];
}
}
else if(nsum == 2) {
ll_sum[0] = 0;
for(int idx = 0; idx < NT; idx++) {
ll_sum[0] += ll[idx];
}
}
}
//derivates of firing rate function w.r.t. gamma (assuming fixed latent variables)
__device__ KC_FP_TYPE h(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd) {
if(modelInd > 0.001) {
KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN));
return KC_MIN(KC_POW(logex*1.00000,modelInd)*dt,KC_MAXN);
}
else {
return KC_MIN(exp(lambda*gamma)*dt,KC_MAXN);
}
}
__device__ KC_FP_TYPE dh(KC_FP_TYPE lambda, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE modelInd) {
if( modelInd > 0.001) {
KC_FP_TYPE logex = KC_MAX(KC_MINN,(gamma*lambda>100)?(gamma*lambda):KC_MIN(log1p(exp(lambda*gamma)),KC_MAXN));
KC_FP_TYPE log_der = KC_MIN(lambda/(1+KC_MIN(KC_MAXN,KC_MAX(KC_MINN,exp(-lambda*gamma)))),KC_MAXN);
KC_FP_TYPE der = modelInd*KC_POW(logex*1.00000,modelInd-1.00)*log_der;
return der*dt;
}
else {
return KC_MIN(dt*lambda*KC_EXP(gamma*lambda),KC_MAXN);
}
}
// computes log p(single trial | gamma, fixed lambdas)
__global__ void kcBoundaryLikelihoodTrial(KC_FP_TYPE * y, KC_FP_TYPE * lambdas, int * crossingTimes, int * mBlkIdx, KC_FP_TYPE g, KC_FP_TYPE dt, int NT, KC_FP_TYPE * llSum, KC_FP_TYPE * trialSum, KC_FP_TYPE * trialSumRiemann, KC_FP_TYPE modelInd) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
trialSum[idx] = 0;
trialSumRiemann[idx] = 0;
llSum[idx] = 0;
for(int ii = mBlkIdx[idx]; ii < mBlkIdx[idx+1]; ii++) {
KC_FP_TYPE trueLambda = fmin(1, ((ii-mBlkIdx[idx]) < crossingTimes[idx])?lambdas[ii]:1);
KC_FP_TYPE fr = KC_MAX(KC_MINN,h(trueLambda,g,1,modelInd));
llSum[idx] += y[ii]*(KC_LOG(fr)+KC_LOG(dt)) - dt*fr -lgamma(y[ii]+1);
KC_FP_TYPE dr = dh(trueLambda,g,1,modelInd);
trialSum[idx] += (y[ii]/fr-dt)*dr;
trialSumRiemann[idx] += -dt*dr*dr/fr;
}
}
}
//Computes the the log probability of a set of spike trains under the ramping model given a fixed set of latent variable
// as a function of \gamma (the bound height) along with first/second derivates w.r.t. \gamma
//args
// 0 = lambda (latent variables, on GPU. Same size as y)
// 1 = auxillary variable - threshold crossing time (latent variable boundary crossing time, on GPU. vector length number of trials: NT)
// 2 = y (observations, on GPU)
// 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 4 = g (absorbing boundary effective height)
// 5 = dt (bin size in seconds)
// 6 = gPrior (contains Fisher information of gamma)
// 7 = lPrior (contains log prior probability of gamma)
// 8 = modelInd (power if use log1p transfer function, 0 if using exp)
//
//outputs (left-hand side)
// 0 = log p(y|lambdas,gamma)
// 1 = d/dg log p(y|lambdas,gamma)
// 2 = d^2/d^2g log p(y|lambdas,gamma)
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
cudaError_t ce;
//loads up trial information
unsigned int TT = kcGetArrayNumEl(prhs[0]);
int * crossingTimes = kcGetArrayDataInt(prhs[1]);
KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT);
int * trIdx = kcGetArrayDataInt(prhs[3]);
unsigned int NT = kcGetArrayNumEl(prhs[3])-1;
KC_FP_TYPE dt = mxGetScalar(prhs[5]);
//loads gamma and latent variables
KC_FP_TYPE g = mxGetScalar(prhs[4]);
KC_FP_TYPE * lambda = kcGetArrayData(prhs[0]);
//loads log prior probability of the gamma value
if(mxGetClassID(prhs[6]) != KC_FP_TYPE_MATLAB) {
mexErrMsgTxt("Prior matrix input wrong floating point type (kcLangevinStep)!");
}
KC_FP_TYPE gPrior = mxGetScalar(prhs[6]);
KC_FP_TYPE lPrior = mxGetScalar(prhs[7]);
KC_FP_TYPE modelInd = mxGetScalar(prhs[8]);
//sets up space for computations on GPU
KC_FP_TYPE * der_log_p_y;
checkCudaErrors(cudaMalloc((void**)&der_log_p_y,sizeof(KC_FP_TYPE)*(NT)));
KC_FP_TYPE * der_log_p_y_sum;
checkCudaErrors(cudaMalloc((void**)&der_log_p_y_sum,sizeof(KC_FP_TYPE)*(1)));
KC_FP_TYPE * log_p_y;
checkCudaErrors(cudaMalloc((void**)&log_p_y,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * log_p_y_sum;
checkCudaErrors(cudaMalloc((void**)&log_p_y_sum,sizeof(KC_FP_TYPE)*1));
KC_FP_TYPE * G_log_p_y1;
checkCudaErrors(cudaMalloc((void**)&G_log_p_y1,sizeof(KC_FP_TYPE)*(NT)));
KC_FP_TYPE * G_log_p_y_sum;
checkCudaErrors(cudaMalloc((void**)&G_log_p_y_sum,sizeof(KC_FP_TYPE)*(1)*(1)));
//sets up CUDA variables
int blockSize = 2;
int numBlocks = (int)NT/(int)blockSize + ((NT%blockSize==0)?0:1);
//gets each trials likelihood + derivates of gamma
kcBoundaryLikelihoodTrial<<< numBlocks,blockSize >>>(y,lambda,crossingTimes,trIdx,g,dt, NT,log_p_y,der_log_p_y,G_log_p_y1,modelInd);
checkCudaErrors(cudaDeviceSynchronize());
//sums up all the trials' likelihoods and derivatives with respect to gamma
int nBlocksC = 3;
int blockSizeC = 1;
kcSumLangevinVars <<< nBlocksC,blockSizeC >>> (der_log_p_y, der_log_p_y_sum, G_log_p_y1, G_log_p_y_sum, log_p_y, log_p_y_sum, trIdx, NT, gPrior, lPrior);
checkCudaErrors(cudaDeviceSynchronize());
//pushes answers back to MATLAB
if(nlhs > 0) {
plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),log_p_y_sum,sizeof(KC_FP_TYPE)*1,cudaMemcpyDeviceToHost));
}
if(nlhs > 1) {
plhs[1] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),der_log_p_y_sum,sizeof(KC_FP_TYPE)*(1),cudaMemcpyDeviceToHost));
}
if(nlhs > 2) {
plhs[2] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[2]),G_log_p_y_sum,sizeof(KC_FP_TYPE)*(1)*(1),cudaMemcpyDeviceToHost));
}
//clears up GPU variables
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(log_p_y));
checkCudaErrors(cudaFree(log_p_y_sum));
checkCudaErrors(cudaFree(der_log_p_y));
checkCudaErrors(cudaFree(der_log_p_y_sum));
checkCudaErrors(cudaFree(G_log_p_y1));
checkCudaErrors(cudaFree(G_log_p_y_sum));
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error at the end of kcLangevinStep.cu ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA errors");
}
}
|
d28a8a1240f956a069f4ec39a380383f6df5dd92.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This code implements the interleaved and neighbor-paired approaches to
* parallel reduction in CUDA. For this example, the sum operation is used. A
* variety of optimizations on parallel reduction aimed at reducing divergence
* are also demonstrated, such as unrolling.
*/
/*
// Neighbored Pair Implementation with less divergence
__global__ void reduceNeighboredLess (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}*/
/*
__global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
*/
/*
__global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
*/
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = 512; // initial block size
if(argc > 1)
{
blocksize = atoi(argv[1]); // block size from command line argument
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// kernel 1: reduceNeighbored
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = h_odata[0];
for (int i = 0; i < grid.x / 8; i++)
if (gpu_sum > h_odata[i]) gpu_sum = h_odata[i];
//for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
gpu_sum = h_odata[0];
printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 2: reduceNeighbored with less divergence
/*CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);*/
// kernel 3: reduceInterleaved
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = h_odata[0];
for (int i = 0; i < grid.x / 8; i++)
if (gpu_sum > h_odata[i]) gpu_sum = h_odata[i];
printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 4: reduceUnrolling2
/* CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x);
// kernel 5: reduceUnrolling4
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x);*/
// kernel 6: reduceUnrolling8
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
hipLaunchKernelGGL(( reduceUnrolling8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = h_odata[0];
for (int i = 0; i < grid.x / 8; i++)
if (gpu_sum > h_odata[i]) gpu_sum = h_odata[i];
printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i];
// kernel 8: reduceUnrollWarps8
/*CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnrollWarsp8
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnroll
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
switch (blocksize)
{
case 1024:
reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 512:
reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 256:
reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 128:
reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 64:
reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
}
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);*/
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| d28a8a1240f956a069f4ec39a380383f6df5dd92.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This code implements the interleaved and neighbor-paired approaches to
* parallel reduction in CUDA. For this example, the sum operation is used. A
* variety of optimizations on parallel reduction aimed at reducing divergence
* are also demonstrated, such as unrolling.
*/
/*
// Neighbored Pair Implementation with less divergence
__global__ void reduceNeighboredLess (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if(idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}*/
/*
__global__ void reduceUnrolling2 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
*/
/*
__global__ void reduceUnrollWarps8 (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// in-place reduction in global memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
*/
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = 512; // initial block size
if(argc > 1)
{
blocksize = atoi(argv[1]); // block size from command line argument
}
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
{
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)( rand() & 0xFF );
}
memcpy (tmp, h_idata, bytes);
double iStart, iElaps;
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
iStart = seconds();
int cpu_sum = recursiveReduce (tmp, size);
iElaps = seconds() - iStart;
printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum);
// kernel 1: reduceNeighbored
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = h_odata[0];
for (int i = 0; i < grid.x / 8; i++)
if (gpu_sum > h_odata[i]) gpu_sum = h_odata[i];
//for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
gpu_sum = h_odata[0];
printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 2: reduceNeighbored with less divergence
/*CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);*/
// kernel 3: reduceInterleaved
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = h_odata[0];
for (int i = 0; i < grid.x / 8; i++)
if (gpu_sum > h_odata[i]) gpu_sum = h_odata[i];
printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x, block.x);
// kernel 4: reduceUnrolling2
/* CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x);
// kernel 5: reduceUnrolling4
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i];
printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x);*/
// kernel 6: reduceUnrolling8
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = h_odata[0];
for (int i = 0; i < grid.x / 8; i++)
if (gpu_sum > h_odata[i]) gpu_sum = h_odata[i];
printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i];
// kernel 8: reduceUnrollWarps8
/*CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnrollWarsp8
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);
// kernel 9: reduceCompleteUnroll
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
switch (blocksize)
{
case 1024:
reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 512:
reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 256:
reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 128:
reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata,
size);
break;
case 64:
reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
}
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block "
"%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x);*/
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
27bcead1a4dc3d8c59263189d5148e84685f3648.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" {
#include "head.h"
}
#include <stdio.h>
#define MAX_BLOCK_DIM_SIZE 65535
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
//#ifndef _REDUCE_KERNEL_H_
//#define _REDUCE_KERNEL_H_
__device__ float reduction_sum_rad_xsec = 0.0; // Used for the deldop xsec all frames fx
int isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__inline__ __device__
float warpReduceSum(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
float warpReduceMax(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val = fmaxf((__shfl_down(val, offset)),val);
return val;
}
__inline__ __device__
float blockReduceSum(float val) {
static __shared__ float shared[32];
int lane=threadIdx.x%warpSize;
int wid=threadIdx.x/warpSize;
val=warpReduceSum(val);
//write reduced value to shared memory
if(lane==0) shared[wid]=val;
__syncthreads();
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : float(0.0);
if(wid==0) val=warpReduceSum(val);
return val;
}
__inline__ __device__
float blockReduceMax(float val) {
static __shared__ float shared[32];
int lane=threadIdx.x%warpSize;
int wid=threadIdx.x/warpSize;
val=warpReduceMax(val);
//write reduced value to shared memory
if(lane==0) shared[wid]=val;
__syncthreads();
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : float(0.0);
if(wid==0) val=warpReduceMax(val);
return val;
}
__global__ void deviceReduceWarpAtomicKernel(float *in, float* out, int N) {
float sum = float(0.0);
for(int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if (threadIdx.x & (warpSize - 1) == 0)
atomicAdd(out, sum);
}
__global__ void device_reduce_block_atomic_kernel(float *in, float* out, int N) {
float sum=float(0.0);
for(int i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x) {
sum+=in[i];
}
sum=blockReduceSum(sum);
if(threadIdx.x==0)
atomicAdd(out,sum);
}
__global__ void device_sum_block_atomic_kernel(float *in, float* out, int N) {
float maxz=float(0.0);
for(int i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x) {
maxz=fmaxf(in[i],maxz);
}
maxz=blockReduceMax(maxz);
if(threadIdx.x==0)
atomicExch(out,maxz);
}
/* Compute the number of threads and blocks to use for reduction kernel 6
* For kernel 6, we observe the maximum specified number of blocks, because
* each thread in that kernel can process a variable number of elements. */
float2 getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads)
{
//get device capability, to avoid block/grid size exceed the upper bound
hipDeviceProp_t prop;
int device, threads, blocks;
float2 xb_yt;
gpuErrchk(hipGetDevice(&device));
gpuErrchk(hipGetDeviceProperties(&prop, device));
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
printf("Array size for parallel reduction is too large!\n");
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
blocks = MIN(maxBlocks, blocks);
xb_yt.x = blocks;
xb_yt.y = threads;
return xb_yt;
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, int nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, int nIsPow2>
__global__ void
maxz6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T myMax = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
myMax = fmaxf(g_idata[i], myMax);
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
myMax = fmaxf(g_idata[i+blockSize], myMax);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = myMax;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 256]);
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 128]);
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 64]);
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) myMax = fmaxf(sdata[tid + 32], myMax);
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
myMax = fmaxf(myMax, __shfl_down(myMax, offset));
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 32]);
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 16]);
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 8]);
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 4]);
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 2]);
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 1]);
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = myMax;
}
template <class T, unsigned int blockSize, int nIsPow2>
__global__ void
minz6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T myMin = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
myMin = fminf(g_idata[i], myMin);
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
myMin = fminf(g_idata[i+blockSize], myMin);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = myMin;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 256]);
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 128]);
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 64]);
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) myMin = fminf(sdata[tid + 32], myMin);
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
myMin = fminf(myMin, __shfl_down(myMin, offset));
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 32]);
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 16]);
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 8]);
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 4]);
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 2]);
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 1]);
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = myMin;
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
reduce(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the reduction function for 3 types
template void
reduce<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
reduce<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
reduce<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
template <class T>
void
reducedI(int size, int threads, int blocks, int whichKernel, T *dv,
T *dcom0, T *dcom1, T *dcom2, T *dI00, T *dI01, T *dI02, T *dI10,
T *dI11, T *dI12, T *dI20, T *dI21, T *dI22, T *d_odata_dv,
T *d_odata_dcom0, T *d_odata_dcom1, T *d_odata_dcom2, T *d_odata_dI00,
T *d_odata_dI01, T *d_odata_dI02, T *d_odata_dI10, T *d_odata_dI11,
T *d_odata_dI12, T *d_odata_dI20, T *d_odata_dI21, T *d_odata_dI22)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
/* Create 13 streams */
hipStream_t stream01, stream02, stream03, stream04, stream05, stream06,
stream07, stream08, stream09, stream10, stream11, stream12, stream13;
hipStreamCreate(&stream01); hipStreamCreate(&stream02);
hipStreamCreate(&stream03); hipStreamCreate(&stream04);
hipStreamCreate(&stream05); hipStreamCreate(&stream06);
hipStreamCreate(&stream07); hipStreamCreate(&stream08);
hipStreamCreate(&stream09); hipStreamCreate(&stream10);
hipStreamCreate(&stream11); hipStreamCreate(&stream12);
hipStreamCreate(&stream13);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dv (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom0 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom1 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom2 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI00 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI01 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI02 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI10 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI11 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI12 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI20 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI21 (pow2) and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI22 (pow2) and 256 threads");
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 256:
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dv and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom0 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom1 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom2 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI00 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI01 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI02 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI10 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI11 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI12 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI20 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI21 and 256 threads");
hipLaunchKernelGGL(( reduce6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI22 and 256 threads");
break;
case 128:
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 64:
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 32:
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 16:
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 8:
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 4:
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 2:
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
case 1:
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream01 , dv, d_odata_dv, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream02 , dcom0, d_odata_dcom0, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream03 , dcom1, d_odata_dcom1, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream04 , dcom2, d_odata_dcom2, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream05 , dI00, d_odata_dI00, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream06 , dI01, d_odata_dI01, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream07 , dI02, d_odata_dI02, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream08 , dI10, d_odata_dI10, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream09 , dI11, d_odata_dI11, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream10 , dI12, d_odata_dI12, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream11 , dI20, d_odata_dI20, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream12 , dI21, d_odata_dI21, size);
hipLaunchKernelGGL(( reduce6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, stream13 , dI22, d_odata_dI22, size);
break;
}
}
break;
}
/* Destroy the streams */
hipStreamDestroy(stream01); hipStreamDestroy(stream02);
hipStreamDestroy(stream03); hipStreamDestroy(stream04);
hipStreamDestroy(stream05); hipStreamDestroy(stream06);
hipStreamDestroy(stream07); hipStreamDestroy(stream08);
hipStreamDestroy(stream09); hipStreamDestroy(stream10);
hipStreamDestroy(stream11); hipStreamDestroy(stream12);
hipStreamDestroy(stream12);
}
/* Instantiate, but just for floats for now */
template void
reducedI<float>(int size, int numThreads, int numBlocks, int whichKernel,
float *dv, float *dcom0, float *dcom1, float *dcom2, float *dI00,
float *dI01, float *dI02, float *dI10, float *dI11, float *dI12,
float *dI20, float *dI21, float *dI22, float *d_odata_dv, float
*d_odata_dcom0, float *d_odata_dcom1, float *d_odata_dcom2, float
*d_odata_dI00, float *d_odata_dI01, float *d_odata_dI02, float
*d_odata_dI10, float *d_odata_dI11, float *d_odata_dI12, float
*d_odata_dI20, float *d_odata_dI21, float *d_odata_dI22);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
maxz(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( maxz6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( maxz6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( maxz6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( maxz6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( maxz6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( maxz6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( maxz6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( maxz6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( maxz6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( maxz6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( maxz6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( maxz6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( maxz6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( maxz6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( maxz6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( maxz6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( maxz6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( maxz6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( maxz6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( maxz6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the maxz function for 3 types
template void
maxz<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
maxz<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
maxz<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
minz(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( minz6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( minz6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( minz6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( minz6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( minz6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( minz6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( minz6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( minz6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( minz6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( minz6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( minz6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( minz6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( minz6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( minz6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( minz6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( minz6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( minz6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( minz6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( minz6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( minz6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the minz function for 3 types
template void
minz<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
minz<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
minz<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
maxzexp(int size, int threads, int blocks,
int whichKernel, T *d_idata1, T *d_idata2, T *d_idata3, T *d_idata4,
T *d_odata1, T *d_odata2, T *d_odata3, T *d_odata4,
hipStream_t *stream1, hipStream_t *stream2, hipStream_t *stream3,
hipStream_t *stream4)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( maxz6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 256:
hipLaunchKernelGGL(( maxz6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 128:
hipLaunchKernelGGL(( maxz6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 64:
hipLaunchKernelGGL(( maxz6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 32:
hipLaunchKernelGGL(( maxz6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 16:
hipLaunchKernelGGL(( maxz6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 8:
hipLaunchKernelGGL(( maxz6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 4:
hipLaunchKernelGGL(( maxz6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 2:
hipLaunchKernelGGL(( maxz6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 1:
hipLaunchKernelGGL(( maxz6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
}
}
else
{
switch (threads)
{
case 512:
hipLaunchKernelGGL(( maxz6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 256:
hipLaunchKernelGGL(( maxz6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 128:
hipLaunchKernelGGL(( maxz6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 64:
hipLaunchKernelGGL(( maxz6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 32:
hipLaunchKernelGGL(( maxz6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 16:
hipLaunchKernelGGL(( maxz6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 8:
hipLaunchKernelGGL(( maxz6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 4:
hipLaunchKernelGGL(( maxz6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 2:
hipLaunchKernelGGL(( maxz6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
case 1:
hipLaunchKernelGGL(( maxz6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream1 , d_idata1, d_odata1, size);
hipLaunchKernelGGL(( maxz6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream2 , d_idata2, d_odata2, size);
hipLaunchKernelGGL(( minz6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream3 , d_idata3, d_odata3, size);
hipLaunchKernelGGL(( minz6<T, 1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, *stream4 , d_idata4, d_odata4, size);
break;
}
}
break;
}
}
// Instantiate the maxz function for 3 types
template void
maxzexp<int>(int size, int threads, int blocks, int whichKernel, int *d_idata1,
int *d_idata2, int *d_idata3, int *d_idata4, int *d_odata1, int *d_odata2,
int *d_odata3, int *d_odata4, hipStream_t *stream1, hipStream_t *stream2,
hipStream_t *stream3, hipStream_t *stream4);
template void
maxzexp<float>(int size, int threads, int blocks, int whichKernel, float *d_idata1,
float *d_idata2, float *d_idata3, float *d_idata4, float *d_odata1,
float *d_odata2, float *d_odata3, float *d_odata4, hipStream_t *stream1,
hipStream_t *stream2, hipStream_t *stream3, hipStream_t *stream4);
template void
maxzexp<double>(int size, int threads, int blocks, int whichKernel, double *d_idata1,
double *d_idata2, double *d_idata3, double *d_idata4, double *d_odata1,
double *d_odata2, double *d_odata3, double *d_odata4, hipStream_t *stream1,
hipStream_t *stream2, hipStream_t *stream3, hipStream_t *stream4);
__global__ void set_idata_zmax_krnl(struct dat_t *ddat, float *d_idata,
int set, int frm, int size) {
/* MULTI-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size){
d_idata[i] = ddat->set[set].desc.deldop.frame[frm].pos.z_s[i];
}
}
__global__ void set_idata_zmax_all_frames_krnl(struct dat_t *ddat,
float *d_idata, int set, int frm, int frame_size) {
/* MULTI-threaded kernel */
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < frame_size){
d_idata[offset] = ddat->set[set].desc.deldop.frame[frm].pos.z_s[offset];
}
}
__global__ void set_idata_pntr_krnl(struct dat_t *ddat, float *d_idata,
int set, int frm, int size) {
/* MULTI-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size){
switch (ddat->set[set].type) {
case DELAY:
d_idata[i] = ddat->set[set].desc.deldop.frame[frm].fit_s[i];
break;
case DOPPLER:
d_idata[i] = ddat->set[set].desc.doppler.frame[frm].fit_s[i];
break;
}
}
}
__global__ void set_idata_modarea_krnl(struct mod_t *dmod, float *d_idata,
int c, int size) {
/* MULTI-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
d_idata[i] = dmod->shape.comp[c].real.f[i].area;
}
__global__ void set_dv_dcom_di_krnl(float *d_odata_dv, float *d_odata_dcom0,
float *d_odata_dcom1, float *d_odata_dcom2, float *d_odata_dI00,
float *d_odata_dI01, float *d_odata_dI02, float *d_odata_dI10,
float *d_odata_dI11, float *d_odata_dI12, float *d_odata_dI20,
float *d_odata_dI21, float *d_odata_dI22, struct mod_t *dmod, int c) {
/* Single threaded kernel to update the model with volume, COM, and inertia */
/* Note that this kernel ignores multi-threaded models for now */
if (threadIdx.x == 0) {
dmod->shape.comp[c].volume = dmod->shape.volume = d_odata_dv[0];
dmod->shape.comp[c].com[0] = dmod->shape.com[0] = d_odata_dcom0[0];
dmod->shape.comp[c].com[1] = dmod->shape.com[1] = d_odata_dcom1[0];
dmod->shape.comp[c].com[2] = dmod->shape.com[2] = d_odata_dcom2[0];
dmod->shape.comp[c].inertia[0][0] = dmod->shape.inertia[0][0] = d_odata_dI00[0];
dmod->shape.comp[c].inertia[0][1] = dmod->shape.inertia[0][1] = d_odata_dI01[0];
dmod->shape.comp[c].inertia[0][2] = dmod->shape.inertia[0][2] = d_odata_dI02[0];
dmod->shape.comp[c].inertia[1][0] = dmod->shape.inertia[1][0] = d_odata_dI10[0];
dmod->shape.comp[c].inertia[1][1] = dmod->shape.inertia[1][1] = d_odata_dI11[0];
dmod->shape.comp[c].inertia[1][2] = dmod->shape.inertia[1][2] = d_odata_dI12[0];
dmod->shape.comp[c].inertia[2][0] = dmod->shape.inertia[2][0] = d_odata_dI20[0];
dmod->shape.comp[c].inertia[2][1] = dmod->shape.inertia[2][1] = d_odata_dI21[0];
dmod->shape.comp[c].inertia[2][2] = dmod->shape.inertia[2][2] = d_odata_dI22[0];
}
}
__global__ void set_xlim_ylim_krnl(struct dat_t *ddat, int set, int frm, int src,
float *d_odata_imax, float *d_odata_imin, float *d_odata_jmax,
float *d_odata_jmin, float *minmax_overall) {
/* Single-threaded kernel to update pos->xlim and ylim and also the overall
* model limits regardless of the POS limits */
int n;
if (threadIdx.x == 0) {
/* First set the overall model limits, regardless of POS frame limits */
minmax_overall[0] = d_odata_imin[0];
minmax_overall[1] = d_odata_imax[0];
minmax_overall[2] = d_odata_jmin[0];
minmax_overall[3] = d_odata_jmax[0];
/* Now set pos->xlim and pos->ylim depending on data type */
switch(ddat->set[set].type) {
case DELAY:
n = ddat->set[set].desc.deldop.frame[frm].pos.n;
if (src) {
ddat->set[set].desc.deldop.frame[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
}
else {
ddat->set[set].desc.deldop.frame[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
}
break;
case DOPPLER:
n = ddat->set[set].desc.doppler.frame[frm].pos.n;
if (src) {
ddat->set[set].desc.doppler.frame[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
}
else {
ddat->set[set].desc.doppler.frame[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
}
break;
case POS:
// if (src) {
// ddat->set[set].desc.poset.frame[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
// }
// else {
// ddat->set[set].desc.poset.frame[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
// }
// break;
case LGHTCRV:
n = ddat->set[set].desc.lghtcrv.rend[frm].pos.n;
if (src) {
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
}
else {
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
}
break;
}
}
}
__global__ void zmax_all_frames_finalize_krnl(struct dat_t *ddat, float *zmax,
int nframes, int s) {
/* single-threaded kernel */
float sum = 0.0;
if (threadIdx.x == 0) {
for (int f=0; f<nframes; f++)
sum += (zmax[f] * ddat->set[s].desc.deldop.frame[f].weight);
zmax[0] = sum;
}
}
__global__ void deldop_xsec_frame_finalize_krnl(struct dat_t *ddat,
int s, int f, float value, float *xsec) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
xsec[f] = ddat->set[s].desc.deldop.frame[f].overflow_xsec;
xsec[f] += value;
xsec[f] *= ddat->set[s].desc.deldop.frame[f].cal.val;
}
}
__global__ void deldop_xsec_set_finalize_krnl(struct dat_t *ddat,
int s, float *xsec, int nframes) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
for (int f=0; f<nframes; f++)
reduction_sum_rad_xsec += xsec[f]*ddat->set[s].desc.deldop.frame[f].weight;
}
}
__global__ void c2af_set_data_krnl(float **input, float *d_idata, int frm, int frmsz) {
/* frmsz-threaded kernel */
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < frmsz) {
d_idata[offset] = input[frm][offset];
}
}
__host__ float compute_deldop_xsec_pr6(struct dat_t *ddat, int ndel, int ndop,
int set, int frm) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s, size = ndel*ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float xsec = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
hipLaunchKernelGGL(( set_idata_pntr_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_deldop_xsec");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call reduction for first time */
reduce<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("reduce<float> in compute_deldop_xsec");
/* Reset d_idata for later use as buffer */
hipMemset(d_idata, 0, size*sizeof(float));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
hipMemcpy(d_idata, d_odata, s*sizeof(float), hipMemcpyDeviceToDevice);
reduce<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(hipMemcpy(h_odata, d_odata, 2*sizeof(float), hipMemcpyDeviceToHost));
xsec = h_odata[0];
free(h_odata);
hipFree(d_odata);
hipFree(d_idata);
return xsec;
}
__host__ float compute_deldop_xsec_snglkrnl(struct dat_t *ddat, int ndel, int ndop,
int set, int frm) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int size = ndel*ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float xsec = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), size);
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
hipLaunchKernelGGL(( set_idata_pntr_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_deldop_xsec");
/* Call reduction */
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_odata, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_snglkrnl)");
deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_snglkrnl)");
xsec = d_odata[0];
hipFree(d_odata);
hipFree(d_idata);
return xsec;
}
__host__ float compute_deldop_xsec_all_frames(struct dat_t *ddat, int ndel, int ndop,
int set, int nframes) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int size = ndel*ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *xsec, xsec_set; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), size);
hipMallocManaged((void**)&d_odata, sizeof(float)*numBlocks, hipMemAttachHost);
//hipMallocManaged((void**)&xsec, sizeof(float)*nframes, hipMemAttachHost);
cudaCalloc((void**)&xsec, sizeof(float), nframes);
for (int i=0; i<nframes; i++)
xsec[i]=0.0;
/* Start loop through frames */
for (int f=0; f<nframes; f++) {
//set_idata_zmax_all_frames_krnl<<<BLK,THD>>>(ddat, d_idata, set, f, size);
hipLaunchKernelGGL(( set_idata_pntr_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, f, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_deldop_xsec_all_frames");
/* Call reduction */
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_odata, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_all_frames)");
//deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_all_frames)");
/* Calculate frames weighting */
hipLaunchKernelGGL(( deldop_xsec_frame_finalize_krnl), dim3(1),dim3(1), 0, 0, ddat, set, f, d_odata[0], xsec);
checkErrorAfterKernelLaunch("deldop_xsec_frame_finalize_krnl");
deviceSyncAfterKernelLaunch("");
}
/* Now finalize the set and return value */
hipLaunchKernelGGL(( deldop_xsec_set_finalize_krnl), dim3(1),dim3(1), 0, 0, ddat, set, xsec, nframes);
checkErrorAfterKernelLaunch("deldop_xsec_set_finalize_krnl");
deviceSyncAfterKernelLaunch("deldop_xsec_set_finalize_krnl");
gpuErrchk(hipMemcpyFromSymbol(&xsec_set, reduction_sum_rad_xsec,
sizeof(float), 0, hipMemcpyDeviceToHost));
hipFree(d_odata);
hipFree(d_idata);
hipFree(xsec);
return xsec_set;
}
__host__ float compute_pos_zmax(struct dat_t *ddat, int size,
int set, int frm) {
/* Function calculates the zmax in a pos->z arrary with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float zmax = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
hipLaunchKernelGGL(( set_idata_zmax_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_zmax_krnl in compute_zmax");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call maxz for first time */
maxz<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("maxz<float> in compute_zmax");
/* Reset d_idata for later use as buffer */
hipMemset(d_idata, 0, size*sizeof(float));
/* Now sum partial block sums on GPU, using the maxz6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
hipMemcpy(d_idata, d_odata, s*sizeof(float), hipMemcpyDeviceToDevice);
maxz<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(hipMemcpy(h_odata, d_odata, 2*sizeof(float), hipMemcpyDeviceToHost));
zmax = h_odata[0];
free(h_odata);
hipFree(d_odata);
hipFree(d_idata);
return zmax;
}
//__host__ float compute_pos_zmax_streams(struct dat_t *ddat, int size,
// int set, int nframes) {
// /* Function calculates the zmax in a pos->z array with Nvidia's reduction
// * sample code (simplified and adapted for use with shape). This version
// * uses cudaStreams to process all frames concurrently. */
//
// int s; // array size
// int maxThreads = maxThreadsPerBlock; // max # of threads per block
// int maxBlocks = 2048; // max # of blocks per grid
// int whichKernel = 6; // id of reduction kernel
// int numBlocks = 0; // initialize numBlocks
// int numThreads = 0; // initialize numThreads
// float zmax = 0.0; // radar cross section; return value
// float *d_odata; // temp. float array for reduction output
// float *d_idata; // temp. float arrays for reduction input
// float *h_odata; // the host output array
// float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
//
// dim3 BLK,THD;
// BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
// THD.x = maxThreadsPerBlock; // Thread block dimensions
//
// hipStream_t zmax_stream[nframes];
//
//
// /* Allocate memory for d_idata, then set that pointer equal to the right
// * data set and frame to the right deldop fit array */
// cudaCalloc((void**)&d_idata, sizeof(float), size);
//
//hipLaunchKernelGGL(( set_idata_zmax_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, frm, size);
// checkErrorAfterKernelLaunch("set_idata_zmax_krnl in compute_zmax");
//
// /* Find number of blocks & threads needed for reduction call */
// xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
// numBlocks = xblock_ythread.x;
// numThreads = xblock_ythread.y;
//
// /* Allocate memory for d_odata and d_odata2 with enough elements to hold
// * the reduction of each block during the first call */
// cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
// h_odata = (float *) malloc(numBlocks*sizeof(float));
//
// /* Call maxz for first time */
// maxz<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
// checkErrorAfterKernelLaunch("maxz<float> in compute_zmax");
//
// /* Reset d_idata for later use as buffer */
// hipMemset(d_idata, 0, size*sizeof(float));
//
// /* Now sum partial block sums on GPU, using the maxz6<> kernel */
// s = numBlocks;
//
// while (s > 1)
// {
// int threads = 0, blocks = 0;
// xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
// blocks = xblock_ythread.x;
// threads = xblock_ythread.y;
//
// /* Copy the first d_odata back into d_idata2 */
// hipMemcpy(d_idata, d_odata, s*sizeof(float), hipMemcpyDeviceToDevice);
//
// maxz<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
//
// if (whichKernel < 3)
// s = (s + threads - 1) / threads;
// else
// s = (s + (threads*2-1)) / (threads*2);
// if (s > 1)
// printf("s is bigger than one");
// }
//
// gpuErrchk(hipMemcpy(h_odata, d_odata, 2*sizeof(float), hipMemcpyDeviceToHost));
// zmax = h_odata[0];
// free(h_odata);
// hipFree(d_odata);
// hipFree(d_idata);
// return zmax;
//}
__host__ float compute_pos_zmax_all_frames(struct dat_t *ddat, int frame_size, int set, int nframes) {
/* Function calculates the zmax per frame and then the final zmax for the set.
* Code assumes that frame_size is the same for all frames in set */
int s; // array size
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *zmax, final; // radar cross section (per frame)
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(frame_size, maxBlocks, maxThreadsPerBlock);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Need to calculate zmax per frame, then multiply by frame weight and
* add to sum_deldop_zmax, which is again weighted and then returned.
* Copy each frame's pos->z_s into a double pointer or maybe split into subsets
* of the main total_size array? */
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), frame_size);
hipMallocManaged((void**)&d_odata, sizeof(float)*numBlocks, hipMemAttachHost);
hipMallocManaged((void**)&zmax, sizeof(float)*nframes, hipMemAttachHost);
/* Configure data copy kernel launch */
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + frame_size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Start loop through frames */
for (int f=0; f<nframes; f++) {
/* Copy input data into input array */
hipLaunchKernelGGL(( set_idata_zmax_all_frames_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set,
f, frame_size);
checkErrorAfterKernelLaunch("set_idata_zmax_all_frames_krnl in compute_zmax_all_frames");
deviceSyncAfterKernelLaunch("");
/* Start debug */
//dbg_print_array(d_idata, 151, 151);
/* End debug */
/* Call reduction */
hipLaunchKernelGGL(( device_sum_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_odata, frame_size);
checkErrorAfterKernelLaunch("device_sum_block_atomic_kernel (compute_pos_zmax_all_frames)");
deviceSyncAfterKernelLaunch("");
/* Copy zmax for this frame from the output array into the zmax array for the right frame */
zmax[f] = d_odata[0];
}
/* Now apply frame weighting factors and sum up all frames to get sum_deldop_zmax to return */
hipLaunchKernelGGL(( zmax_all_frames_finalize_krnl), dim3(1),dim3(1), 0, 0, ddat,zmax,nframes,set);
checkErrorAfterKernelLaunch("zmax_all_frames_finalize_krnl");
final = zmax[0];
hipFree(d_odata);
hipFree(d_idata);
return final;
}
__host__ float compute_pos_zmax_all_frames_2(struct dat_t *ddat, int size,
int set, int nframes) {
/* Function calculates the zmax in a pos->z arrary with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s; // array size
int maxThreads = maxThreadsPerBlock;// max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *zmax, zmaxfinal = 0.0; // max z values; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
//hipMallocManaged((void**)&zmax, sizeof(float)*nframes, hipMemAttachHost);
cudaCalloc((void**)&zmax, sizeof(float), nframes);
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
for (int frm=0; frm<nframes; frm++) {
hipLaunchKernelGGL(( set_idata_zmax_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_zmax_krnl in compute_zmax");
/* Call maxz for first time */
maxz<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("maxz<float> in compute_zmax");
/* Reset d_idata for later use as buffer */
hipMemset(d_idata, 0, size*sizeof(float));
/* Now sum partial block sums on GPU, using the maxz6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
hipMemcpy(d_idata, d_odata, s*sizeof(float), hipMemcpyDeviceToDevice);
maxz<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(hipMemcpy(h_odata, d_odata, 2*sizeof(float), hipMemcpyDeviceToHost));
deviceSyncAfterKernelLaunch("");
zmax[frm] = h_odata[0];
} /*End frame loop*/
/* Now apply frame weighting factors and sum up all frames to get sum_deldop_zmax to return */
hipLaunchKernelGGL(( zmax_all_frames_finalize_krnl), dim3(1),dim3(1), 0, 0, ddat,zmax,nframes,set);
checkErrorAfterKernelLaunch("zmax_all_frames_finalize_krnl");
deviceSyncAfterKernelLaunch("zmax_all_frames_finalize_krnl");
zmaxfinal = zmax[0];
free(h_odata);
hipFree(d_odata);
hipFree(d_idata);
hipFree(zmax);
return zmaxfinal;
}
__host__ float compute_doppler_xsec(struct dat_t *ddat, int ndop,
int set, int frm) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s, size=ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float xsec = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
hipLaunchKernelGGL(( set_idata_pntr_krnl), dim3(BLK),dim3(THD), 0, 0, ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_doppler_xsec");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call reduction for first time */
reduce<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("reduce<float> in compute_deldop_xsec");
/* Reset d_idata for later use as buffer */
gpuErrchk(hipMemset(d_idata, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
hipMemcpy(d_idata, d_odata, s*sizeof(float), hipMemcpyDeviceToDevice);
reduce<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(hipMemcpy(h_odata, d_odata, 1*sizeof(float), hipMemcpyDeviceToHost));
xsec = h_odata[0];
free(h_odata);
hipFree(d_odata);
hipFree(d_idata);
return xsec;
}
__host__ float compute_model_area1(struct mod_t *dmod, int c, int size) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float area = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
hipLaunchKernelGGL(( set_idata_modarea_krnl), dim3(BLK),dim3(THD), 0, 0, dmod, d_idata, c, size);
checkErrorAfterKernelLaunch("set_idata_modarea_krnl in compute_doppler_xsec");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call reduction for first time */
reduce<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("reduce<float> in compute_deldop_xsec");
/* Reset d_idata for later use as buffer */
gpuErrchk(hipMemset(d_idata, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
gpuErrchk(hipMemcpy(d_idata, d_odata, s*sizeof(float),
hipMemcpyDeviceToDevice));
reduce<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(hipMemcpy(h_odata, d_odata, 1*sizeof(float), hipMemcpyDeviceToHost));
area = h_odata[0];
free(h_odata);
hipFree(d_odata);
hipFree(d_idata);
return area;
}
__host__ float compute_model_area(struct mod_t *dmod, int c, int size) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float area = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), size);
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
/* Load the d_idata array */
hipLaunchKernelGGL(( set_idata_modarea_krnl), dim3(BLK),dim3(THD), 0, 0, dmod, d_idata, c, size);
checkErrorAfterKernelLaunch("set_idata_modarea_krnl in compute_doppler_xsec");
/* Call reduction */
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata, d_odata, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_model_area)");
deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_model_area)");
area = d_odata[0];
hipFree(d_odata);
hipFree(d_idata);
return area;
}
__host__ void dvdI_reduce_single(struct mod_t *dmod, float *dv, float *dcom0,
float *dcom1, float *dcom2, float *dI00, float *dI01, float *dI02,
float *dI10, float *dI11, float *dI12, float *dI20, float *dI21,
float *dI22, int size, int c)
{
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
/* Device output arrays */
float *d_odata_dcom0, *d_odata_dcom1, *d_odata_dcom2, *d_odata_dI00,
*d_odata_dI01, *d_odata_dI02, *d_odata_dI10, *d_odata_dI11,
*d_odata_dI12, *d_odata_dI20, *d_odata_dI21, *d_odata_dI22,
*d_odata_dv;
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for the device output arrays for first reduction */
cudaCalloc((void**)&d_odata_dv, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom0, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom1, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI00, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI01, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI02, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI10, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI11, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI12, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI20, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI21, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI22, sizeof(float), numBlocks);
hipStream_t stream[13];
for (int i=0; i<13; i++)
gpuErrchk(hipStreamCreate(&stream[i]));
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[0] , dv, d_odata_dv, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dv)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[1] , dcom0, d_odata_dcom0, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dcom0)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[2] , dcom1, d_odata_dcom1, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dcom1)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[3] , dcom2, d_odata_dcom2, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dcom2)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[4] , dI00, d_odata_dI00, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI00)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[5] , dI01, d_odata_dI01, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI01)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[6] , dI02, d_odata_dI02, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI02)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[7] , dI10, d_odata_dI10, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI10)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[8] , dI11, d_odata_dI11, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI11)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[9] , dI12, d_odata_dI12, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI12)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[10] , dI20, d_odata_dI20, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI20)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[11] , dI21, d_odata_dI21, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI21)");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[12] , dI22, d_odata_dI22, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI22)");
for (int i=0; i<13; i++)
gpuErrchk(hipStreamSynchronize(stream[i]));
/* Copy and assign */
hipLaunchKernelGGL(( set_dv_dcom_di_krnl), dim3(1),dim3(1), 0, 0, d_odata_dv, d_odata_dcom0, d_odata_dcom1,
d_odata_dcom2, d_odata_dI00, d_odata_dI01, d_odata_dI02, d_odata_dI10,
d_odata_dI11, d_odata_dI12, d_odata_dI20, d_odata_dI21, d_odata_dI22,
dmod, c);
/* Free up the temporary arrays */
hipFree(d_odata_dv);
hipFree(d_odata_dcom0); hipFree(d_odata_dcom1); hipFree(d_odata_dcom2);
hipFree(d_odata_dI00); hipFree(d_odata_dI01); hipFree(d_odata_dI02);
hipFree(d_odata_dI10); hipFree(d_odata_dI11); hipFree(d_odata_dI12);
hipFree(d_odata_dI20); hipFree(d_odata_dI21); hipFree(d_odata_dI22);
for (int i=0; i<13; i++)
gpuErrchk(hipStreamDestroy(stream[i]));
}
__host__ void compute_dv_dcom_dI_reduction(float *dv, float *dcom0, float
*dcom1, float *dcom2, float *dI00, float *dI01, float *dI02, float
*dI10, float *dI11, float *dI12, float *dI20, float *dI21, float *dI22,
int c, int size, struct mod_t *dmod) {
/* Function calculates the model's COM and Inertia tensors */
int s; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
/* Device output arrays */
float *d_odata_dcom0, *d_odata_dcom1, *d_odata_dcom2, *d_odata_dI00,
*d_odata_dI01, *d_odata_dI02, *d_odata_dI10, *d_odata_dI11,
*d_odata_dI12, *d_odata_dI20, *d_odata_dI21, *d_odata_dI22,
*d_odata_dv;
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
//
// dim3 BLK,THD;
// BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
// THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for the device output arrays for first reduction */
cudaCalloc((void**)&d_odata_dv, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom0, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom1, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI00, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI01, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI02, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI10, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI11, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI12, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI20, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI21, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI22, sizeof(float), numBlocks);
/* Call reductions for first time */
if (STREAMS) {
reducedI<float>(size, numThreads, numBlocks, whichKernel, dv, dcom0, dcom1, dcom2,
dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22, d_odata_dv,
d_odata_dcom0, d_odata_dcom1, d_odata_dcom2, d_odata_dI00, d_odata_dI01,
d_odata_dI02, d_odata_dI10, d_odata_dI11, d_odata_dI12, d_odata_dI20,
d_odata_dI21, d_odata_dI22);
checkErrorAfterKernelLaunch("reducedI<float> in compute_dv_dcom_dI_reduction"); }
else {
reduce<float>(size, numThreads, numBlocks, whichKernel, dv, d_odata_dv);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dcom0, d_odata_dcom0);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dcom1, d_odata_dcom1);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dcom2, d_odata_dcom2);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI00, d_odata_dI00);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI01, d_odata_dI01);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI02, d_odata_dI02);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI10, d_odata_dI10);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI11, d_odata_dI11);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI12, d_odata_dI12);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI20, d_odata_dI20);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI21, d_odata_dI21);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI22, d_odata_dI22);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
}
/* Reset the orig. input arrays for later use as buffer */
gpuErrchk(hipMemset(dv, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dcom0, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dcom1, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dcom2, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI00, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI01, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI02, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI10, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI11, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI12, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI20, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI21, 0, size*sizeof(float)));
gpuErrchk(hipMemset(dI22, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the d_odata_xx arrays back into the zeroed-out input arrays */
gpuErrchk(hipMemcpy(dv, d_odata_dv, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dcom0, d_odata_dcom0, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dcom1, d_odata_dcom1, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dcom2, d_odata_dcom2, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI00, d_odata_dI00, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI01, d_odata_dI01, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI02, d_odata_dI02, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI10, d_odata_dI10, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI11, d_odata_dI11, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI12, d_odata_dI12, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI20, d_odata_dI20, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI21, d_odata_dI21, s*sizeof(float), hipMemcpyDeviceToDevice));
gpuErrchk(hipMemcpy(dI22, d_odata_dI22, s*sizeof(float), hipMemcpyDeviceToDevice));
/* Call all reductions again until s = 1 */
if (STREAMS) {
reducedI<float>(s, threads, blocks, whichKernel, dv, dcom0, dcom1, dcom2,
dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22, d_odata_dv,
d_odata_dcom0, d_odata_dcom1, d_odata_dcom2, d_odata_dI00, d_odata_dI01,
d_odata_dI02, d_odata_dI10, d_odata_dI11, d_odata_dI12, d_odata_dI20,
d_odata_dI21, d_odata_dI22);
checkErrorAfterKernelLaunch("reducedI<float> in compute_dv_dcom_dI_reduction");
}
else {
reduce<float>(s, threads, blocks, whichKernel, dv, d_odata_dv);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dcom0, d_odata_dcom0);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dcom1, d_odata_dcom1);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dcom2, d_odata_dcom2);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI00, d_odata_dI00);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI01, d_odata_dI01);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI02, d_odata_dI02);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI10, d_odata_dI10);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI11, d_odata_dI11);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI12, d_odata_dI12);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI20, d_odata_dI20);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI21, d_odata_dI21);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI22, d_odata_dI22);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
}
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
/* Copy and assign */
hipLaunchKernelGGL(( set_dv_dcom_di_krnl), dim3(1),dim3(1), 0, 0, d_odata_dv, d_odata_dcom0, d_odata_dcom1,
d_odata_dcom2, d_odata_dI00, d_odata_dI01, d_odata_dI02, d_odata_dI10,
d_odata_dI11, d_odata_dI12, d_odata_dI20, d_odata_dI21, d_odata_dI22,
dmod, c);
/* Free up the temporary arrays */
hipFree(d_odata_dv);
hipFree(d_odata_dcom0); hipFree(d_odata_dcom1); hipFree(d_odata_dcom2);
hipFree(d_odata_dI00); hipFree(d_odata_dI01); hipFree(d_odata_dI02);
hipFree(d_odata_dI10); hipFree(d_odata_dI11); hipFree(d_odata_dI12);
hipFree(d_odata_dI20); hipFree(d_odata_dI21); hipFree(d_odata_dI22);
}
__global__ void dbg_pos_xlim_krnl(float *debug, float *d_odata_imax,
float *d_odata_jmax, float *d_odata_imin, float *d_odata_jmin) {
/* Single-threaded debug kernel */
if (threadIdx.x == 0) {
debug[0] = d_odata_imin[0];
debug[1] = d_odata_imax[0];
debug[2] = d_odata_jmin[0];
debug[3] = d_odata_jmax[0];
printf("\nimin: %g", debug[0]);
printf("\nimax: %g", debug[1]);
printf("\njmin: %g", debug[2]);
printf("\njmax: %g", debug[3]);
}
}
__global__ void dbg_print_device_array(float *in, int size) {
/* Single-threaded debug kernel */
int i;
if (threadIdx.x == 0) {
for (i=0; i<size; i++)
printf("\ndev_array[%i]=%g", i, in[i]);
}
}
__host__ void compute_xlim_ylim(struct dat_t *ddat, int size,
int set, int frm, int src, float *iminflt, float *imaxflt, float *jminflt,
float *jmaxflt, float *minmax_overall) {
/* Function calculates the pos->xlim and pos->ylim values and also the
* imax_overall/imin_overall/jmax_overall/jmin_overall values and updates
* pos accordingly */
int s; // array size
int maxThreads = 256; // max # of threads per block
int maxBlocks = 256; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *d_odata_imax; // temp. float arrays for reduction output
float *d_odata_imin;
float *d_odata_jmax;
float *d_odata_jmin;
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Create the streams (four) */
// hipStream_t stream1, stream2, stream3, stream4;
// hipStreamCreate(&stream1);
// hipStreamCreate(&stream2);
// hipStreamCreate(&stream3);
// hipStreamCreate(&stream4);
/* Allocate memory for four device output data arrays d_odata_imax,
* d_odata_imin, d_odata_jmax, d_odata_jminwith enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata_imax, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_imin, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_jmax, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_jmin, sizeof(float), numBlocks);
// gpuErrchk(hipHostMalloc((void**)&d_odata_imax, sizeof(float)*4,
// hipHostMallocWriteCombined | hipHostMallocMapped));
// gpuErrchk(hipHostMalloc((void**)&d_odata_imin, sizeof(float)*4,
// hipHostMallocWriteCombined | hipHostMallocMapped));
// gpuErrchk(hipHostMalloc((void**)&d_odata_jmax, sizeof(float)*4,
// hipHostMallocWriteCombined | hipHostMallocMapped));
// gpuErrchk(hipHostMalloc((void**)&d_odata_jmin, sizeof(float)*4,
// hipHostMallocWriteCombined | hipHostMallocMapped));
/* Call maxz for first time */
maxz<float>(size, numThreads, numBlocks, whichKernel, imaxflt, d_odata_imax);
checkErrorAfterKernelLaunch("maxz<float> for imaxflt in compute_xlim_ylim");
maxz<float>(size, numThreads, numBlocks, whichKernel, jmaxflt, d_odata_jmax);
checkErrorAfterKernelLaunch("maxz<float> for jmaxflt in compute_xlim_ylim");
minz<float>(size, numThreads, numBlocks, whichKernel, iminflt, d_odata_imin);
checkErrorAfterKernelLaunch("minz<float> for iminflt in compute_xlim_ylim");
minz<float>(size, numThreads, numBlocks, whichKernel, jminflt, d_odata_jmin);
checkErrorAfterKernelLaunch("minz<float> for jminflt in compute_xlim_ylim");
// maxzexp<float>(size, numThreads, numBlocks, whichKernel, imaxflt, jmaxflt,
// iminflt, jminflt, d_odata_imax, d_odata_jmax, d_odata_imin,
// d_odata_jmin, &stream1, &stream2, &stream3, &stream4);
/* Reset the original input arrays for later use as buffer */
gpuErrchk(hipMemset(imaxflt, 0, size*sizeof(float)));
gpuErrchk(hipMemset(iminflt, 0, size*sizeof(float)));
gpuErrchk(hipMemset(jmaxflt, 0, size*sizeof(float)));
gpuErrchk(hipMemset(jminflt, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the maxz6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
gpuErrchk(hipMemcpy(imaxflt, d_odata_imax, s*sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(iminflt, d_odata_imin, s*sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(jmaxflt, d_odata_jmax, s*sizeof(float), hipMemcpyDeviceToHost));
gpuErrchk(hipMemcpy(jminflt, d_odata_jmin, s*sizeof(float), hipMemcpyDeviceToHost));
maxz<float>(s, threads, blocks, whichKernel, imaxflt, d_odata_imax);
checkErrorAfterKernelLaunch("maxz<float> for imaxflt in compute_xlim_ylim");
maxz<float>(s, threads, blocks, whichKernel, jmaxflt, d_odata_jmax);
checkErrorAfterKernelLaunch("maxz<float> for jmaxflt in compute_xlim_ylim");
minz<float>(s, threads, blocks, whichKernel, iminflt, d_odata_imin);
checkErrorAfterKernelLaunch("minz<float> for iminflt in compute_xlim_ylim");
minz<float>(s, threads, blocks, whichKernel, jminflt, d_odata_jmin);
checkErrorAfterKernelLaunch("minz<float> for jminflt in compute_xlim_ylim");
// maxzexp<float>(s, threads, blocks, whichKernel, imaxflt, jmaxflt, iminflt,
// jminflt, d_odata_imax, d_odata_jmax, d_odata_imin, d_odata_jmin,
// &stream1, &stream2, &stream3, &stream4);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
/* Sync streams */
// gpuErrchk(hipStreamSynchronize(stream1));
// gpuErrchk(hipStreamSynchronize(stream2));
// gpuErrchk(hipStreamSynchronize(stream3));
// gpuErrchk(hipStreamSynchronize(stream4));
/* Calculate the min/max overall values (regardless of POS frame limits) */
hipLaunchKernelGGL(( set_xlim_ylim_krnl), dim3(1),dim3(1), 0, 0, ddat, set, frm, src, d_odata_imax, d_odata_imin,
d_odata_jmax, d_odata_jmin, minmax_overall);
checkErrorAfterKernelLaunch("set_xlim_ylim_krnl in compute_xlim_ylim");
/* Nuke streams */
// gpuErrchk(hipStreamDestroy(stream1));
// gpuErrchk(hipStreamDestroy(stream2));
// gpuErrchk(hipStreamDestroy(stream3));
// gpuErrchk(hipStreamDestroy(stream4));
hipFree(d_odata_imax);
hipFree(d_odata_imin);
hipFree(d_odata_jmax);
hipFree(d_odata_jmin);
}
__host__ void c2af_deldop_add_o2_m2(
float **temp_o2,
float **temp_m2,
float **temp_om,
int size,
int nframes) {
/* Function reduces the input arrays for nframes-frames, once per frame.
* The input array is structured like this: input[nframes][size] */
int maxThreads = maxThreadsPerBlock;
int maxBlocks = 2048;
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *d_odata_o2, *d_odata_m2, *d_odata_om;
float *d_idata_o2, *d_idata_m2, *d_idata_om;
float2 xblock_ythread;
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed to reduce ONE FRAME ONLY */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata_o2, sizeof(float), size);
cudaCalloc((void**)&d_odata_o2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_idata_m2, sizeof(float), size);
cudaCalloc((void**)&d_odata_m2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_idata_om, sizeof(float), size);
cudaCalloc((void**)&d_odata_om, sizeof(float), numBlocks);
for (int frm=0; frm<nframes; frm++) {
hipLaunchKernelGGL(( c2af_set_data_krnl), dim3(BLK),dim3(THD), 0, 0, temp_o2, d_idata_o2, frm, size);
checkErrorAfterKernelLaunch("c2af_set_data_krnl in reduction.cu");
hipLaunchKernelGGL(( c2af_set_data_krnl), dim3(BLK),dim3(THD), 0, 0, temp_m2, d_idata_m2, frm, size);
checkErrorAfterKernelLaunch("c2af_set_data_krnl in reduction.cu");
hipLaunchKernelGGL(( c2af_set_data_krnl), dim3(BLK),dim3(THD), 0, 0, temp_om, d_idata_om, frm, size);
checkErrorAfterKernelLaunch("c2af_set_data_krnl in reduction.cu");
/* Call reduction */
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata_o2,
d_odata_o2, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata_m2,
d_odata_m2, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel");
hipLaunchKernelGGL(( device_reduce_block_atomic_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_idata_om,
d_odata_om, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel");
deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel");
temp_o2[frm][0] = d_odata_o2[0];
temp_m2[frm][0] = d_odata_m2[0];
temp_om[frm][0] = d_odata_om[0];
gpuErrchk(hipMemset(d_odata_o2, 0, numBlocks*sizeof(float)));
gpuErrchk(hipMemset(d_odata_m2, 0, numBlocks*sizeof(float)));
gpuErrchk(hipMemset(d_odata_om, 0, numBlocks*sizeof(float)));
}
/* Output sum for each frame is in first entry for each frame in the
* input array */
hipFree(d_odata_o2);
hipFree(d_idata_o2);
hipFree(d_odata_m2);
hipFree(d_idata_m2);
hipFree(d_odata_om);
hipFree(d_idata_om);
}
//#endif // #ifndef _REDUCE_KERNEL_H_
| 27bcead1a4dc3d8c59263189d5148e84685f3648.cu | extern "C" {
#include "head.h"
}
#include <stdio.h>
#define MAX_BLOCK_DIM_SIZE 65535
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
//#ifndef _REDUCE_KERNEL_H_
//#define _REDUCE_KERNEL_H_
__device__ float reduction_sum_rad_xsec = 0.0; // Used for the deldop xsec all frames fx
int isPow2(unsigned int x)
{
return ((x&(x-1))==0);
}
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
__inline__ __device__
float warpReduceSum(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
float warpReduceMax(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val = fmaxf((__shfl_down(val, offset)),val);
return val;
}
__inline__ __device__
float blockReduceSum(float val) {
static __shared__ float shared[32];
int lane=threadIdx.x%warpSize;
int wid=threadIdx.x/warpSize;
val=warpReduceSum(val);
//write reduced value to shared memory
if(lane==0) shared[wid]=val;
__syncthreads();
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : float(0.0);
if(wid==0) val=warpReduceSum(val);
return val;
}
__inline__ __device__
float blockReduceMax(float val) {
static __shared__ float shared[32];
int lane=threadIdx.x%warpSize;
int wid=threadIdx.x/warpSize;
val=warpReduceMax(val);
//write reduced value to shared memory
if(lane==0) shared[wid]=val;
__syncthreads();
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : float(0.0);
if(wid==0) val=warpReduceMax(val);
return val;
}
__global__ void deviceReduceWarpAtomicKernel(float *in, float* out, int N) {
float sum = float(0.0);
for(int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if (threadIdx.x & (warpSize - 1) == 0)
atomicAdd(out, sum);
}
__global__ void device_reduce_block_atomic_kernel(float *in, float* out, int N) {
float sum=float(0.0);
for(int i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x) {
sum+=in[i];
}
sum=blockReduceSum(sum);
if(threadIdx.x==0)
atomicAdd(out,sum);
}
__global__ void device_sum_block_atomic_kernel(float *in, float* out, int N) {
float maxz=float(0.0);
for(int i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x) {
maxz=fmaxf(in[i],maxz);
}
maxz=blockReduceMax(maxz);
if(threadIdx.x==0)
atomicExch(out,maxz);
}
/* Compute the number of threads and blocks to use for reduction kernel 6
* For kernel 6, we observe the maximum specified number of blocks, because
* each thread in that kernel can process a variable number of elements. */
float2 getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads)
{
//get device capability, to avoid block/grid size exceed the upper bound
cudaDeviceProp prop;
int device, threads, blocks;
float2 xb_yt;
gpuErrchk(cudaGetDevice(&device));
gpuErrchk(cudaGetDeviceProperties(&prop, device));
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
printf("Array size for parallel reduction is too large!\n");
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
blocks = MIN(maxBlocks, blocks);
xb_yt.x = blocks;
xb_yt.y = threads;
return xb_yt;
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, int nIsPow2>
__global__ void
reduce6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
mySum += g_idata[i+blockSize];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
/*
This version adds multiple elements per thread sequentially. This reduces the overall
cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n).
(Brent's Theorem optimization)
Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory.
In other words if blockSize <= 32, allocate 64*sizeof(T) bytes.
If blockSize > 32, allocate blockSize*sizeof(T) bytes.
*/
template <class T, unsigned int blockSize, int nIsPow2>
__global__ void
maxz6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T myMax = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
myMax = fmaxf(g_idata[i], myMax);
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
myMax = fmaxf(g_idata[i+blockSize], myMax);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = myMax;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 256]);
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 128]);
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 64]);
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) myMax = fmaxf(sdata[tid + 32], myMax);
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
myMax = fmaxf(myMax, __shfl_down(myMax, offset));
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 32]);
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 16]);
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 8]);
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 4]);
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 2]);
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = myMax = fmaxf(myMax, sdata[tid + 1]);
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = myMax;
}
template <class T, unsigned int blockSize, int nIsPow2>
__global__ void
minz6(T *g_idata, T *g_odata, unsigned int n)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
T myMin = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
myMin = fminf(g_idata[i], myMin);
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (nIsPow2 || i + blockSize < n)
myMin = fminf(g_idata[i+blockSize], myMin);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = myMin;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 256]);
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 128]);
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 64]);
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) myMin = fminf(sdata[tid + 32], myMin);
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
myMin = fminf(myMin, __shfl_down(myMin, offset));
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 32]);
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 16]);
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 8]);
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 4]);
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 2]);
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = myMin = fminf(myMin, sdata[tid + 1]);
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = myMin;
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
reduce(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the reduction function for 3 types
template void
reduce<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
reduce<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
reduce<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
template <class T>
void
reducedI(int size, int threads, int blocks, int whichKernel, T *dv,
T *dcom0, T *dcom1, T *dcom2, T *dI00, T *dI01, T *dI02, T *dI10,
T *dI11, T *dI12, T *dI20, T *dI21, T *dI22, T *d_odata_dv,
T *d_odata_dcom0, T *d_odata_dcom1, T *d_odata_dcom2, T *d_odata_dI00,
T *d_odata_dI01, T *d_odata_dI02, T *d_odata_dI10, T *d_odata_dI11,
T *d_odata_dI12, T *d_odata_dI20, T *d_odata_dI21, T *d_odata_dI22)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
/* Create 13 streams */
cudaStream_t stream01, stream02, stream03, stream04, stream05, stream06,
stream07, stream08, stream09, stream10, stream11, stream12, stream13;
cudaStreamCreate(&stream01); cudaStreamCreate(&stream02);
cudaStreamCreate(&stream03); cudaStreamCreate(&stream04);
cudaStreamCreate(&stream05); cudaStreamCreate(&stream06);
cudaStreamCreate(&stream07); cudaStreamCreate(&stream08);
cudaStreamCreate(&stream09); cudaStreamCreate(&stream10);
cudaStreamCreate(&stream11); cudaStreamCreate(&stream12);
cudaStreamCreate(&stream13);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dv (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom0 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom1 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom2 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI00 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI01 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI02 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI10 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI11 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI12 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI20 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI21 (pow2) and 256 threads");
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI22 (pow2) and 256 threads");
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dv and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom0 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom1 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dcom2 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI00 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI01 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI02 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI10 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI11 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI12 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI20 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI21 and 256 threads");
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
checkErrorAfterKernelLaunch("reduce6 in reducedI, dI22 and 256 threads");
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream01 >>>(dv, d_odata_dv, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream02 >>>(dcom0, d_odata_dcom0, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream03 >>>(dcom1, d_odata_dcom1, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream04 >>>(dcom2, d_odata_dcom2, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream05 >>>(dI00, d_odata_dI00, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream06 >>>(dI01, d_odata_dI01, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream07 >>>(dI02, d_odata_dI02, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream08 >>>(dI10, d_odata_dI10, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream09 >>>(dI11, d_odata_dI11, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream10 >>>(dI12, d_odata_dI12, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream11 >>>(dI20, d_odata_dI20, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream12 >>>(dI21, d_odata_dI21, size);
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize, stream13 >>>(dI22, d_odata_dI22, size);
break;
}
}
break;
}
/* Destroy the streams */
cudaStreamDestroy(stream01); cudaStreamDestroy(stream02);
cudaStreamDestroy(stream03); cudaStreamDestroy(stream04);
cudaStreamDestroy(stream05); cudaStreamDestroy(stream06);
cudaStreamDestroy(stream07); cudaStreamDestroy(stream08);
cudaStreamDestroy(stream09); cudaStreamDestroy(stream10);
cudaStreamDestroy(stream11); cudaStreamDestroy(stream12);
cudaStreamDestroy(stream12);
}
/* Instantiate, but just for floats for now */
template void
reducedI<float>(int size, int numThreads, int numBlocks, int whichKernel,
float *dv, float *dcom0, float *dcom1, float *dcom2, float *dI00,
float *dI01, float *dI02, float *dI10, float *dI11, float *dI12,
float *dI20, float *dI21, float *dI22, float *d_odata_dv, float
*d_odata_dcom0, float *d_odata_dcom1, float *d_odata_dcom2, float
*d_odata_dI00, float *d_odata_dI01, float *d_odata_dI02, float
*d_odata_dI10, float *d_odata_dI11, float *d_odata_dI12, float
*d_odata_dI20, float *d_odata_dI21, float *d_odata_dI22);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
maxz(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
maxz6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
maxz6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
maxz6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
maxz6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
maxz6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
maxz6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
maxz6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
maxz6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
maxz6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
maxz6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
maxz6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
maxz6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
maxz6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
maxz6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
maxz6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
maxz6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
maxz6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
maxz6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
maxz6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
maxz6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the maxz function for 3 types
template void
maxz<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
maxz<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
maxz<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
minz(int size, int threads, int blocks,
int whichKernel, T *d_idata, T *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
minz6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
minz6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
minz6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
minz6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
minz6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
minz6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
minz6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
minz6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
minz6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
minz6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
minz6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
minz6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
minz6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
minz6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
minz6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
minz6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
minz6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
minz6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
minz6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
minz6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
break;
}
}
// Instantiate the minz function for 3 types
template void
minz<int>(int size, int threads, int blocks,
int whichKernel, int *d_idata, int *d_odata);
template void
minz<float>(int size, int threads, int blocks,
int whichKernel, float *d_idata, float *d_odata);
template void
minz<double>(int size, int threads, int blocks,
int whichKernel, double *d_idata, double *d_odata);
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
template <class T>
void
maxzexp(int size, int threads, int blocks,
int whichKernel, T *d_idata1, T *d_idata2, T *d_idata3, T *d_idata4,
T *d_odata1, T *d_odata2, T *d_odata3, T *d_odata4,
cudaStream_t *stream1, cudaStream_t *stream2, cudaStream_t *stream3,
cudaStream_t *stream4)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
// choose which of the optimized versions of reduction to launch
switch (whichKernel)
{
case 6:
default:
if (isPow2(size))
{
switch (threads)
{
case 512:
maxz6<T, 512, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 512, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 512, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 512, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 256:
maxz6<T, 256, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 256, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 256, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 256, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 128:
maxz6<T, 128, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 128, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 128, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 128, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 64:
maxz6<T, 64, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 64, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 64, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 64, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 32:
maxz6<T, 32, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 32, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 32, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 32, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 16:
maxz6<T, 16, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 16, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 16, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 16, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 8:
maxz6<T, 8, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 8, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 8, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 8, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 4:
maxz6<T, 4, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 4, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 4, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 4, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 2:
maxz6<T, 2, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 2, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 2, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 2, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 1:
maxz6<T, 1, true><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 1, true><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 1, true><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 1, true><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
}
}
else
{
switch (threads)
{
case 512:
maxz6<T, 512, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 512, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 512, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 512, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 256:
maxz6<T, 256, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 256, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 256, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 256, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 128:
maxz6<T, 128, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 128, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 128, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 128, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 64:
maxz6<T, 64, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 64, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 64, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 64, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 32:
maxz6<T, 32, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 32, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 32, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 32, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 16:
maxz6<T, 16, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 16, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 16, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 16, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 8:
maxz6<T, 8, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 8, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 8, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 8, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 4:
maxz6<T, 4, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 4, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 4, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 4, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 2:
maxz6<T, 2, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 2, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 2, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 2, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
case 1:
maxz6<T, 1, false><<< dimGrid, dimBlock, smemSize, *stream1 >>>(d_idata1, d_odata1, size);
maxz6<T, 1, false><<< dimGrid, dimBlock, smemSize, *stream2 >>>(d_idata2, d_odata2, size);
minz6<T, 1, false><<< dimGrid, dimBlock, smemSize, *stream3 >>>(d_idata3, d_odata3, size);
minz6<T, 1, false><<< dimGrid, dimBlock, smemSize, *stream4 >>>(d_idata4, d_odata4, size);
break;
}
}
break;
}
}
// Instantiate the maxz function for 3 types
template void
maxzexp<int>(int size, int threads, int blocks, int whichKernel, int *d_idata1,
int *d_idata2, int *d_idata3, int *d_idata4, int *d_odata1, int *d_odata2,
int *d_odata3, int *d_odata4, cudaStream_t *stream1, cudaStream_t *stream2,
cudaStream_t *stream3, cudaStream_t *stream4);
template void
maxzexp<float>(int size, int threads, int blocks, int whichKernel, float *d_idata1,
float *d_idata2, float *d_idata3, float *d_idata4, float *d_odata1,
float *d_odata2, float *d_odata3, float *d_odata4, cudaStream_t *stream1,
cudaStream_t *stream2, cudaStream_t *stream3, cudaStream_t *stream4);
template void
maxzexp<double>(int size, int threads, int blocks, int whichKernel, double *d_idata1,
double *d_idata2, double *d_idata3, double *d_idata4, double *d_odata1,
double *d_odata2, double *d_odata3, double *d_odata4, cudaStream_t *stream1,
cudaStream_t *stream2, cudaStream_t *stream3, cudaStream_t *stream4);
__global__ void set_idata_zmax_krnl(struct dat_t *ddat, float *d_idata,
int set, int frm, int size) {
/* MULTI-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size){
d_idata[i] = ddat->set[set].desc.deldop.frame[frm].pos.z_s[i];
}
}
__global__ void set_idata_zmax_all_frames_krnl(struct dat_t *ddat,
float *d_idata, int set, int frm, int frame_size) {
/* MULTI-threaded kernel */
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < frame_size){
d_idata[offset] = ddat->set[set].desc.deldop.frame[frm].pos.z_s[offset];
}
}
__global__ void set_idata_pntr_krnl(struct dat_t *ddat, float *d_idata,
int set, int frm, int size) {
/* MULTI-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size){
switch (ddat->set[set].type) {
case DELAY:
d_idata[i] = ddat->set[set].desc.deldop.frame[frm].fit_s[i];
break;
case DOPPLER:
d_idata[i] = ddat->set[set].desc.doppler.frame[frm].fit_s[i];
break;
}
}
}
__global__ void set_idata_modarea_krnl(struct mod_t *dmod, float *d_idata,
int c, int size) {
/* MULTI-threaded kernel */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
d_idata[i] = dmod->shape.comp[c].real.f[i].area;
}
__global__ void set_dv_dcom_di_krnl(float *d_odata_dv, float *d_odata_dcom0,
float *d_odata_dcom1, float *d_odata_dcom2, float *d_odata_dI00,
float *d_odata_dI01, float *d_odata_dI02, float *d_odata_dI10,
float *d_odata_dI11, float *d_odata_dI12, float *d_odata_dI20,
float *d_odata_dI21, float *d_odata_dI22, struct mod_t *dmod, int c) {
/* Single threaded kernel to update the model with volume, COM, and inertia */
/* Note that this kernel ignores multi-threaded models for now */
if (threadIdx.x == 0) {
dmod->shape.comp[c].volume = dmod->shape.volume = d_odata_dv[0];
dmod->shape.comp[c].com[0] = dmod->shape.com[0] = d_odata_dcom0[0];
dmod->shape.comp[c].com[1] = dmod->shape.com[1] = d_odata_dcom1[0];
dmod->shape.comp[c].com[2] = dmod->shape.com[2] = d_odata_dcom2[0];
dmod->shape.comp[c].inertia[0][0] = dmod->shape.inertia[0][0] = d_odata_dI00[0];
dmod->shape.comp[c].inertia[0][1] = dmod->shape.inertia[0][1] = d_odata_dI01[0];
dmod->shape.comp[c].inertia[0][2] = dmod->shape.inertia[0][2] = d_odata_dI02[0];
dmod->shape.comp[c].inertia[1][0] = dmod->shape.inertia[1][0] = d_odata_dI10[0];
dmod->shape.comp[c].inertia[1][1] = dmod->shape.inertia[1][1] = d_odata_dI11[0];
dmod->shape.comp[c].inertia[1][2] = dmod->shape.inertia[1][2] = d_odata_dI12[0];
dmod->shape.comp[c].inertia[2][0] = dmod->shape.inertia[2][0] = d_odata_dI20[0];
dmod->shape.comp[c].inertia[2][1] = dmod->shape.inertia[2][1] = d_odata_dI21[0];
dmod->shape.comp[c].inertia[2][2] = dmod->shape.inertia[2][2] = d_odata_dI22[0];
}
}
__global__ void set_xlim_ylim_krnl(struct dat_t *ddat, int set, int frm, int src,
float *d_odata_imax, float *d_odata_imin, float *d_odata_jmax,
float *d_odata_jmin, float *minmax_overall) {
/* Single-threaded kernel to update pos->xlim and ylim and also the overall
* model limits regardless of the POS limits */
int n;
if (threadIdx.x == 0) {
/* First set the overall model limits, regardless of POS frame limits */
minmax_overall[0] = d_odata_imin[0];
minmax_overall[1] = d_odata_imax[0];
minmax_overall[2] = d_odata_jmin[0];
minmax_overall[3] = d_odata_jmax[0];
/* Now set pos->xlim and pos->ylim depending on data type */
switch(ddat->set[set].type) {
case DELAY:
n = ddat->set[set].desc.deldop.frame[frm].pos.n;
if (src) {
ddat->set[set].desc.deldop.frame[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
}
else {
ddat->set[set].desc.deldop.frame[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.deldop.frame[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
}
break;
case DOPPLER:
n = ddat->set[set].desc.doppler.frame[frm].pos.n;
if (src) {
ddat->set[set].desc.doppler.frame[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
}
else {
ddat->set[set].desc.doppler.frame[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.doppler.frame[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
}
break;
case POS:
// if (src) {
// ddat->set[set].desc.poset.frame[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
// }
// else {
// ddat->set[set].desc.poset.frame[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
// ddat->set[set].desc.poset.frame[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
// }
// break;
case LGHTCRV:
n = ddat->set[set].desc.lghtcrv.rend[frm].pos.n;
if (src) {
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim2[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim2[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim2[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim2[1] = min((int)d_odata_jmax[0], n);
}
else {
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim[0] = max((int)d_odata_imin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.xlim[1] = min((int)d_odata_imax[0], n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim[0] = max((int)d_odata_jmin[0], -n);
ddat->set[set].desc.lghtcrv.rend[frm].pos.ylim[1] = min((int)d_odata_jmax[0], n);
}
break;
}
}
}
__global__ void zmax_all_frames_finalize_krnl(struct dat_t *ddat, float *zmax,
int nframes, int s) {
/* single-threaded kernel */
float sum = 0.0;
if (threadIdx.x == 0) {
for (int f=0; f<nframes; f++)
sum += (zmax[f] * ddat->set[s].desc.deldop.frame[f].weight);
zmax[0] = sum;
}
}
__global__ void deldop_xsec_frame_finalize_krnl(struct dat_t *ddat,
int s, int f, float value, float *xsec) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
xsec[f] = ddat->set[s].desc.deldop.frame[f].overflow_xsec;
xsec[f] += value;
xsec[f] *= ddat->set[s].desc.deldop.frame[f].cal.val;
}
}
__global__ void deldop_xsec_set_finalize_krnl(struct dat_t *ddat,
int s, float *xsec, int nframes) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
for (int f=0; f<nframes; f++)
reduction_sum_rad_xsec += xsec[f]*ddat->set[s].desc.deldop.frame[f].weight;
}
}
__global__ void c2af_set_data_krnl(float **input, float *d_idata, int frm, int frmsz) {
/* frmsz-threaded kernel */
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < frmsz) {
d_idata[offset] = input[frm][offset];
}
}
__host__ float compute_deldop_xsec_pr6(struct dat_t *ddat, int ndel, int ndop,
int set, int frm) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s, size = ndel*ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float xsec = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
set_idata_pntr_krnl<<<BLK,THD>>>(ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_deldop_xsec");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call reduction for first time */
reduce<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("reduce<float> in compute_deldop_xsec");
/* Reset d_idata for later use as buffer */
cudaMemset(d_idata, 0, size*sizeof(float));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
cudaMemcpy(d_idata, d_odata, s*sizeof(float), cudaMemcpyDeviceToDevice);
reduce<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(cudaMemcpy(h_odata, d_odata, 2*sizeof(float), cudaMemcpyDeviceToHost));
xsec = h_odata[0];
free(h_odata);
cudaFree(d_odata);
cudaFree(d_idata);
return xsec;
}
__host__ float compute_deldop_xsec_snglkrnl(struct dat_t *ddat, int ndel, int ndop,
int set, int frm) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int size = ndel*ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float xsec = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), size);
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
set_idata_pntr_krnl<<<BLK,THD>>>(ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_deldop_xsec");
/* Call reduction */
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata, d_odata, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_snglkrnl)");
deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_snglkrnl)");
xsec = d_odata[0];
cudaFree(d_odata);
cudaFree(d_idata);
return xsec;
}
__host__ float compute_deldop_xsec_all_frames(struct dat_t *ddat, int ndel, int ndop,
int set, int nframes) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int size = ndel*ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *xsec, xsec_set; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), size);
cudaMallocManaged((void**)&d_odata, sizeof(float)*numBlocks, cudaMemAttachHost);
//cudaMallocManaged((void**)&xsec, sizeof(float)*nframes, cudaMemAttachHost);
cudaCalloc((void**)&xsec, sizeof(float), nframes);
for (int i=0; i<nframes; i++)
xsec[i]=0.0;
/* Start loop through frames */
for (int f=0; f<nframes; f++) {
//set_idata_zmax_all_frames_krnl<<<BLK,THD>>>(ddat, d_idata, set, f, size);
set_idata_pntr_krnl<<<BLK,THD>>>(ddat, d_idata, set, f, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_deldop_xsec_all_frames");
/* Call reduction */
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata, d_odata, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_all_frames)");
//deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_deldop_xsec_all_frames)");
/* Calculate frames weighting */
deldop_xsec_frame_finalize_krnl<<<1,1>>>(ddat, set, f, d_odata[0], xsec);
checkErrorAfterKernelLaunch("deldop_xsec_frame_finalize_krnl");
deviceSyncAfterKernelLaunch("");
}
/* Now finalize the set and return value */
deldop_xsec_set_finalize_krnl<<<1,1>>>(ddat, set, xsec, nframes);
checkErrorAfterKernelLaunch("deldop_xsec_set_finalize_krnl");
deviceSyncAfterKernelLaunch("deldop_xsec_set_finalize_krnl");
gpuErrchk(cudaMemcpyFromSymbol(&xsec_set, reduction_sum_rad_xsec,
sizeof(float), 0, cudaMemcpyDeviceToHost));
cudaFree(d_odata);
cudaFree(d_idata);
cudaFree(xsec);
return xsec_set;
}
__host__ float compute_pos_zmax(struct dat_t *ddat, int size,
int set, int frm) {
/* Function calculates the zmax in a pos->z arrary with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float zmax = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
set_idata_zmax_krnl<<<BLK,THD>>>(ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_zmax_krnl in compute_zmax");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call maxz for first time */
maxz<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("maxz<float> in compute_zmax");
/* Reset d_idata for later use as buffer */
cudaMemset(d_idata, 0, size*sizeof(float));
/* Now sum partial block sums on GPU, using the maxz6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
cudaMemcpy(d_idata, d_odata, s*sizeof(float), cudaMemcpyDeviceToDevice);
maxz<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(cudaMemcpy(h_odata, d_odata, 2*sizeof(float), cudaMemcpyDeviceToHost));
zmax = h_odata[0];
free(h_odata);
cudaFree(d_odata);
cudaFree(d_idata);
return zmax;
}
//__host__ float compute_pos_zmax_streams(struct dat_t *ddat, int size,
// int set, int nframes) {
// /* Function calculates the zmax in a pos->z array with Nvidia's reduction
// * sample code (simplified and adapted for use with shape). This version
// * uses cudaStreams to process all frames concurrently. */
//
// int s; // array size
// int maxThreads = maxThreadsPerBlock; // max # of threads per block
// int maxBlocks = 2048; // max # of blocks per grid
// int whichKernel = 6; // id of reduction kernel
// int numBlocks = 0; // initialize numBlocks
// int numThreads = 0; // initialize numThreads
// float zmax = 0.0; // radar cross section; return value
// float *d_odata; // temp. float array for reduction output
// float *d_idata; // temp. float arrays for reduction input
// float *h_odata; // the host output array
// float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
//
// dim3 BLK,THD;
// BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
// THD.x = maxThreadsPerBlock; // Thread block dimensions
//
// cudaStream_t zmax_stream[nframes];
//
//
// /* Allocate memory for d_idata, then set that pointer equal to the right
// * data set and frame to the right deldop fit array */
// cudaCalloc((void**)&d_idata, sizeof(float), size);
//
// set_idata_zmax_krnl<<<BLK,THD>>>(ddat, d_idata, set, frm, size);
// checkErrorAfterKernelLaunch("set_idata_zmax_krnl in compute_zmax");
//
// /* Find number of blocks & threads needed for reduction call */
// xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
// numBlocks = xblock_ythread.x;
// numThreads = xblock_ythread.y;
//
// /* Allocate memory for d_odata and d_odata2 with enough elements to hold
// * the reduction of each block during the first call */
// cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
// h_odata = (float *) malloc(numBlocks*sizeof(float));
//
// /* Call maxz for first time */
// maxz<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
// checkErrorAfterKernelLaunch("maxz<float> in compute_zmax");
//
// /* Reset d_idata for later use as buffer */
// cudaMemset(d_idata, 0, size*sizeof(float));
//
// /* Now sum partial block sums on GPU, using the maxz6<> kernel */
// s = numBlocks;
//
// while (s > 1)
// {
// int threads = 0, blocks = 0;
// xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
// blocks = xblock_ythread.x;
// threads = xblock_ythread.y;
//
// /* Copy the first d_odata back into d_idata2 */
// cudaMemcpy(d_idata, d_odata, s*sizeof(float), cudaMemcpyDeviceToDevice);
//
// maxz<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
//
// if (whichKernel < 3)
// s = (s + threads - 1) / threads;
// else
// s = (s + (threads*2-1)) / (threads*2);
// if (s > 1)
// printf("s is bigger than one");
// }
//
// gpuErrchk(cudaMemcpy(h_odata, d_odata, 2*sizeof(float), cudaMemcpyDeviceToHost));
// zmax = h_odata[0];
// free(h_odata);
// cudaFree(d_odata);
// cudaFree(d_idata);
// return zmax;
//}
__host__ float compute_pos_zmax_all_frames(struct dat_t *ddat, int frame_size, int set, int nframes) {
/* Function calculates the zmax per frame and then the final zmax for the set.
* Code assumes that frame_size is the same for all frames in set */
int s; // array size
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *zmax, final; // radar cross section (per frame)
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(frame_size, maxBlocks, maxThreadsPerBlock);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Need to calculate zmax per frame, then multiply by frame weight and
* add to sum_deldop_zmax, which is again weighted and then returned.
* Copy each frame's pos->z_s into a double pointer or maybe split into subsets
* of the main total_size array? */
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), frame_size);
cudaMallocManaged((void**)&d_odata, sizeof(float)*numBlocks, cudaMemAttachHost);
cudaMallocManaged((void**)&zmax, sizeof(float)*nframes, cudaMemAttachHost);
/* Configure data copy kernel launch */
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + frame_size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Start loop through frames */
for (int f=0; f<nframes; f++) {
/* Copy input data into input array */
set_idata_zmax_all_frames_krnl<<<BLK,THD>>>(ddat, d_idata, set,
f, frame_size);
checkErrorAfterKernelLaunch("set_idata_zmax_all_frames_krnl in compute_zmax_all_frames");
deviceSyncAfterKernelLaunch("");
/* Start debug */
//dbg_print_array(d_idata, 151, 151);
/* End debug */
/* Call reduction */
device_sum_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata, d_odata, frame_size);
checkErrorAfterKernelLaunch("device_sum_block_atomic_kernel (compute_pos_zmax_all_frames)");
deviceSyncAfterKernelLaunch("");
/* Copy zmax for this frame from the output array into the zmax array for the right frame */
zmax[f] = d_odata[0];
}
/* Now apply frame weighting factors and sum up all frames to get sum_deldop_zmax to return */
zmax_all_frames_finalize_krnl<<<1,1>>>(ddat,zmax,nframes,set);
checkErrorAfterKernelLaunch("zmax_all_frames_finalize_krnl");
final = zmax[0];
cudaFree(d_odata);
cudaFree(d_idata);
return final;
}
__host__ float compute_pos_zmax_all_frames_2(struct dat_t *ddat, int size,
int set, int nframes) {
/* Function calculates the zmax in a pos->z arrary with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s; // array size
int maxThreads = maxThreadsPerBlock;// max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *zmax, zmaxfinal = 0.0; // max z values; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
//cudaMallocManaged((void**)&zmax, sizeof(float)*nframes, cudaMemAttachHost);
cudaCalloc((void**)&zmax, sizeof(float), nframes);
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
for (int frm=0; frm<nframes; frm++) {
set_idata_zmax_krnl<<<BLK,THD>>>(ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_zmax_krnl in compute_zmax");
/* Call maxz for first time */
maxz<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("maxz<float> in compute_zmax");
/* Reset d_idata for later use as buffer */
cudaMemset(d_idata, 0, size*sizeof(float));
/* Now sum partial block sums on GPU, using the maxz6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
cudaMemcpy(d_idata, d_odata, s*sizeof(float), cudaMemcpyDeviceToDevice);
maxz<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(cudaMemcpy(h_odata, d_odata, 2*sizeof(float), cudaMemcpyDeviceToHost));
deviceSyncAfterKernelLaunch("");
zmax[frm] = h_odata[0];
} /*End frame loop*/
/* Now apply frame weighting factors and sum up all frames to get sum_deldop_zmax to return */
zmax_all_frames_finalize_krnl<<<1,1>>>(ddat,zmax,nframes,set);
checkErrorAfterKernelLaunch("zmax_all_frames_finalize_krnl");
deviceSyncAfterKernelLaunch("zmax_all_frames_finalize_krnl");
zmaxfinal = zmax[0];
free(h_odata);
cudaFree(d_odata);
cudaFree(d_idata);
cudaFree(zmax);
return zmaxfinal;
}
__host__ float compute_doppler_xsec(struct dat_t *ddat, int ndop,
int set, int frm) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s, size=ndop; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float xsec = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
set_idata_pntr_krnl<<<BLK,THD>>>(ddat, d_idata, set, frm, size);
checkErrorAfterKernelLaunch("set_idata_pntr_krnl in compute_doppler_xsec");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call reduction for first time */
reduce<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("reduce<float> in compute_deldop_xsec");
/* Reset d_idata for later use as buffer */
gpuErrchk(cudaMemset(d_idata, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
cudaMemcpy(d_idata, d_odata, s*sizeof(float), cudaMemcpyDeviceToDevice);
reduce<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(cudaMemcpy(h_odata, d_odata, 1*sizeof(float), cudaMemcpyDeviceToHost));
xsec = h_odata[0];
free(h_odata);
cudaFree(d_odata);
cudaFree(d_idata);
return xsec;
}
__host__ float compute_model_area1(struct mod_t *dmod, int c, int size) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int s; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float area = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float *h_odata; // the host output array
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Allocate memory for d_idata, then set that pointer equal to the right
* data set and frame to the right deldop fit array */
cudaCalloc((void**)&d_idata, sizeof(float), size);
set_idata_modarea_krnl<<<BLK,THD>>>(dmod, d_idata, c, size);
checkErrorAfterKernelLaunch("set_idata_modarea_krnl in compute_doppler_xsec");
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for d_odata and d_odata2 with enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
h_odata = (float *) malloc(numBlocks*sizeof(float));
/* Call reduction for first time */
reduce<float>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
checkErrorAfterKernelLaunch("reduce<float> in compute_deldop_xsec");
/* Reset d_idata for later use as buffer */
gpuErrchk(cudaMemset(d_idata, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
gpuErrchk(cudaMemcpy(d_idata, d_odata, s*sizeof(float),
cudaMemcpyDeviceToDevice));
reduce<float>(s, threads, blocks, whichKernel, d_idata, d_odata);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
gpuErrchk(cudaMemcpy(h_odata, d_odata, 1*sizeof(float), cudaMemcpyDeviceToHost));
area = h_odata[0];
free(h_odata);
cudaFree(d_odata);
cudaFree(d_idata);
return area;
}
__host__ float compute_model_area(struct mod_t *dmod, int c, int size) {
/* Function calculates a delay-Doppler frame's radar cross section with
* Nvidia's reduction sample code (simplified and adapted for use with
* shape). The function returns the cross section as a float */
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float area = 0.0; // radar cross section; return value
float *d_odata; // temp. float array for reduction output
float *d_idata; // temp. float arrays for reduction input
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata, sizeof(float), size);
cudaCalloc((void**)&d_odata, sizeof(float), numBlocks);
/* Load the d_idata array */
set_idata_modarea_krnl<<<BLK,THD>>>(dmod, d_idata, c, size);
checkErrorAfterKernelLaunch("set_idata_modarea_krnl in compute_doppler_xsec");
/* Call reduction */
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata, d_odata, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_model_area)");
deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel (compute_model_area)");
area = d_odata[0];
cudaFree(d_odata);
cudaFree(d_idata);
return area;
}
__host__ void dvdI_reduce_single(struct mod_t *dmod, float *dv, float *dcom0,
float *dcom1, float *dcom2, float *dI00, float *dI01, float *dI02,
float *dI10, float *dI11, float *dI12, float *dI20, float *dI21,
float *dI22, int size, int c)
{
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
/* Device output arrays */
float *d_odata_dcom0, *d_odata_dcom1, *d_odata_dcom2, *d_odata_dI00,
*d_odata_dI01, *d_odata_dI02, *d_odata_dI10, *d_odata_dI11,
*d_odata_dI12, *d_odata_dI20, *d_odata_dI21, *d_odata_dI22,
*d_odata_dv;
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for the device output arrays for first reduction */
cudaCalloc((void**)&d_odata_dv, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom0, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom1, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI00, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI01, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI02, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI10, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI11, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI12, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI20, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI21, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI22, sizeof(float), numBlocks);
cudaStream_t stream[13];
for (int i=0; i<13; i++)
gpuErrchk(cudaStreamCreate(&stream[i]));
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[0] >>>(dv, d_odata_dv, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dv)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[1] >>>(dcom0, d_odata_dcom0, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dcom0)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[2] >>>(dcom1, d_odata_dcom1, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dcom1)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[3] >>>(dcom2, d_odata_dcom2, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dcom2)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[4] >>>(dI00, d_odata_dI00, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI00)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[5] >>>(dI01, d_odata_dI01, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI01)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[6] >>>(dI02, d_odata_dI02, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI02)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[7] >>>(dI10, d_odata_dI10, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI10)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[8] >>>(dI11, d_odata_dI11, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI11)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[9] >>>(dI12, d_odata_dI12, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI12)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[10] >>>(dI20, d_odata_dI20, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI20)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[11] >>>(dI21, d_odata_dI21, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI21)");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock, 0, stream[12] >>>(dI22, d_odata_dI22, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel (dI22)");
for (int i=0; i<13; i++)
gpuErrchk(cudaStreamSynchronize(stream[i]));
/* Copy and assign */
set_dv_dcom_di_krnl<<<1,1>>>(d_odata_dv, d_odata_dcom0, d_odata_dcom1,
d_odata_dcom2, d_odata_dI00, d_odata_dI01, d_odata_dI02, d_odata_dI10,
d_odata_dI11, d_odata_dI12, d_odata_dI20, d_odata_dI21, d_odata_dI22,
dmod, c);
/* Free up the temporary arrays */
cudaFree(d_odata_dv);
cudaFree(d_odata_dcom0); cudaFree(d_odata_dcom1); cudaFree(d_odata_dcom2);
cudaFree(d_odata_dI00); cudaFree(d_odata_dI01); cudaFree(d_odata_dI02);
cudaFree(d_odata_dI10); cudaFree(d_odata_dI11); cudaFree(d_odata_dI12);
cudaFree(d_odata_dI20); cudaFree(d_odata_dI21); cudaFree(d_odata_dI22);
for (int i=0; i<13; i++)
gpuErrchk(cudaStreamDestroy(stream[i]));
}
__host__ void compute_dv_dcom_dI_reduction(float *dv, float *dcom0, float
*dcom1, float *dcom2, float *dI00, float *dI01, float *dI02, float
*dI10, float *dI11, float *dI12, float *dI20, float *dI21, float *dI22,
int c, int size, struct mod_t *dmod) {
/* Function calculates the model's COM and Inertia tensors */
int s; // array size
int maxThreads = maxThreadsPerBlock; // max # of threads per block
int maxBlocks = 2048; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
/* Device output arrays */
float *d_odata_dcom0, *d_odata_dcom1, *d_odata_dcom2, *d_odata_dI00,
*d_odata_dI01, *d_odata_dI02, *d_odata_dI10, *d_odata_dI11,
*d_odata_dI12, *d_odata_dI20, *d_odata_dI21, *d_odata_dI22,
*d_odata_dv;
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
//
// dim3 BLK,THD;
// BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
// THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Allocate memory for the device output arrays for first reduction */
cudaCalloc((void**)&d_odata_dv, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom0, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom1, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dcom2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI00, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI01, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI02, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI10, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI11, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI12, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI20, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI21, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_dI22, sizeof(float), numBlocks);
/* Call reductions for first time */
if (STREAMS) {
reducedI<float>(size, numThreads, numBlocks, whichKernel, dv, dcom0, dcom1, dcom2,
dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22, d_odata_dv,
d_odata_dcom0, d_odata_dcom1, d_odata_dcom2, d_odata_dI00, d_odata_dI01,
d_odata_dI02, d_odata_dI10, d_odata_dI11, d_odata_dI12, d_odata_dI20,
d_odata_dI21, d_odata_dI22);
checkErrorAfterKernelLaunch("reducedI<float> in compute_dv_dcom_dI_reduction"); }
else {
reduce<float>(size, numThreads, numBlocks, whichKernel, dv, d_odata_dv);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dcom0, d_odata_dcom0);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dcom1, d_odata_dcom1);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dcom2, d_odata_dcom2);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI00, d_odata_dI00);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI01, d_odata_dI01);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI02, d_odata_dI02);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI10, d_odata_dI10);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI11, d_odata_dI11);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI12, d_odata_dI12);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI20, d_odata_dI20);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI21, d_odata_dI21);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(size, numThreads, numBlocks, whichKernel, dI22, d_odata_dI22);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
}
/* Reset the orig. input arrays for later use as buffer */
gpuErrchk(cudaMemset(dv, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dcom0, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dcom1, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dcom2, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI00, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI01, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI02, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI10, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI11, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI12, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI20, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI21, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(dI22, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the reduce6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the d_odata_xx arrays back into the zeroed-out input arrays */
gpuErrchk(cudaMemcpy(dv, d_odata_dv, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dcom0, d_odata_dcom0, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dcom1, d_odata_dcom1, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dcom2, d_odata_dcom2, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI00, d_odata_dI00, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI01, d_odata_dI01, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI02, d_odata_dI02, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI10, d_odata_dI10, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI11, d_odata_dI11, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI12, d_odata_dI12, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI20, d_odata_dI20, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI21, d_odata_dI21, s*sizeof(float), cudaMemcpyDeviceToDevice));
gpuErrchk(cudaMemcpy(dI22, d_odata_dI22, s*sizeof(float), cudaMemcpyDeviceToDevice));
/* Call all reductions again until s = 1 */
if (STREAMS) {
reducedI<float>(s, threads, blocks, whichKernel, dv, dcom0, dcom1, dcom2,
dI00, dI01, dI02, dI10, dI11, dI12, dI20, dI21, dI22, d_odata_dv,
d_odata_dcom0, d_odata_dcom1, d_odata_dcom2, d_odata_dI00, d_odata_dI01,
d_odata_dI02, d_odata_dI10, d_odata_dI11, d_odata_dI12, d_odata_dI20,
d_odata_dI21, d_odata_dI22);
checkErrorAfterKernelLaunch("reducedI<float> in compute_dv_dcom_dI_reduction");
}
else {
reduce<float>(s, threads, blocks, whichKernel, dv, d_odata_dv);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dcom0, d_odata_dcom0);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dcom1, d_odata_dcom1);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dcom2, d_odata_dcom2);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI00, d_odata_dI00);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI01, d_odata_dI01);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI02, d_odata_dI02);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI10, d_odata_dI10);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI11, d_odata_dI11);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI12, d_odata_dI12);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI20, d_odata_dI20);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI21, d_odata_dI21);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
reduce<float>(s, threads, blocks, whichKernel, dI22, d_odata_dI22);
checkErrorAfterKernelLaunch("reduce<float> in compute_dv_dcom_dI_reduction");
}
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
/* Copy and assign */
set_dv_dcom_di_krnl<<<1,1>>>(d_odata_dv, d_odata_dcom0, d_odata_dcom1,
d_odata_dcom2, d_odata_dI00, d_odata_dI01, d_odata_dI02, d_odata_dI10,
d_odata_dI11, d_odata_dI12, d_odata_dI20, d_odata_dI21, d_odata_dI22,
dmod, c);
/* Free up the temporary arrays */
cudaFree(d_odata_dv);
cudaFree(d_odata_dcom0); cudaFree(d_odata_dcom1); cudaFree(d_odata_dcom2);
cudaFree(d_odata_dI00); cudaFree(d_odata_dI01); cudaFree(d_odata_dI02);
cudaFree(d_odata_dI10); cudaFree(d_odata_dI11); cudaFree(d_odata_dI12);
cudaFree(d_odata_dI20); cudaFree(d_odata_dI21); cudaFree(d_odata_dI22);
}
__global__ void dbg_pos_xlim_krnl(float *debug, float *d_odata_imax,
float *d_odata_jmax, float *d_odata_imin, float *d_odata_jmin) {
/* Single-threaded debug kernel */
if (threadIdx.x == 0) {
debug[0] = d_odata_imin[0];
debug[1] = d_odata_imax[0];
debug[2] = d_odata_jmin[0];
debug[3] = d_odata_jmax[0];
printf("\nimin: %g", debug[0]);
printf("\nimax: %g", debug[1]);
printf("\njmin: %g", debug[2]);
printf("\njmax: %g", debug[3]);
}
}
__global__ void dbg_print_device_array(float *in, int size) {
/* Single-threaded debug kernel */
int i;
if (threadIdx.x == 0) {
for (i=0; i<size; i++)
printf("\ndev_array[%i]=%g", i, in[i]);
}
}
__host__ void compute_xlim_ylim(struct dat_t *ddat, int size,
int set, int frm, int src, float *iminflt, float *imaxflt, float *jminflt,
float *jmaxflt, float *minmax_overall) {
/* Function calculates the pos->xlim and pos->ylim values and also the
* imax_overall/imin_overall/jmax_overall/jmin_overall values and updates
* pos accordingly */
int s; // array size
int maxThreads = 256; // max # of threads per block
int maxBlocks = 256; // max # of blocks per grid
int whichKernel = 6; // id of reduction kernel
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *d_odata_imax; // temp. float arrays for reduction output
float *d_odata_imin;
float *d_odata_jmax;
float *d_odata_jmin;
float2 xblock_ythread; // used for return value of getNumBlocksAndThreads
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed for reduction call */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
/* Create the streams (four) */
// cudaStream_t stream1, stream2, stream3, stream4;
// cudaStreamCreate(&stream1);
// cudaStreamCreate(&stream2);
// cudaStreamCreate(&stream3);
// cudaStreamCreate(&stream4);
/* Allocate memory for four device output data arrays d_odata_imax,
* d_odata_imin, d_odata_jmax, d_odata_jminwith enough elements to hold
* the reduction of each block during the first call */
cudaCalloc((void**)&d_odata_imax, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_imin, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_jmax, sizeof(float), numBlocks);
cudaCalloc((void**)&d_odata_jmin, sizeof(float), numBlocks);
// gpuErrchk(cudaHostAlloc((void**)&d_odata_imax, sizeof(float)*4,
// cudaHostAllocWriteCombined | cudaHostAllocMapped));
// gpuErrchk(cudaHostAlloc((void**)&d_odata_imin, sizeof(float)*4,
// cudaHostAllocWriteCombined | cudaHostAllocMapped));
// gpuErrchk(cudaHostAlloc((void**)&d_odata_jmax, sizeof(float)*4,
// cudaHostAllocWriteCombined | cudaHostAllocMapped));
// gpuErrchk(cudaHostAlloc((void**)&d_odata_jmin, sizeof(float)*4,
// cudaHostAllocWriteCombined | cudaHostAllocMapped));
/* Call maxz for first time */
maxz<float>(size, numThreads, numBlocks, whichKernel, imaxflt, d_odata_imax);
checkErrorAfterKernelLaunch("maxz<float> for imaxflt in compute_xlim_ylim");
maxz<float>(size, numThreads, numBlocks, whichKernel, jmaxflt, d_odata_jmax);
checkErrorAfterKernelLaunch("maxz<float> for jmaxflt in compute_xlim_ylim");
minz<float>(size, numThreads, numBlocks, whichKernel, iminflt, d_odata_imin);
checkErrorAfterKernelLaunch("minz<float> for iminflt in compute_xlim_ylim");
minz<float>(size, numThreads, numBlocks, whichKernel, jminflt, d_odata_jmin);
checkErrorAfterKernelLaunch("minz<float> for jminflt in compute_xlim_ylim");
// maxzexp<float>(size, numThreads, numBlocks, whichKernel, imaxflt, jmaxflt,
// iminflt, jminflt, d_odata_imax, d_odata_jmax, d_odata_imin,
// d_odata_jmin, &stream1, &stream2, &stream3, &stream4);
/* Reset the original input arrays for later use as buffer */
gpuErrchk(cudaMemset(imaxflt, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(iminflt, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(jmaxflt, 0, size*sizeof(float)));
gpuErrchk(cudaMemset(jminflt, 0, size*sizeof(float)));
/* Now sum partial block sums on GPU, using the maxz6<> kernel */
s = numBlocks;
while (s > 1)
{
int threads = 0, blocks = 0;
xblock_ythread = getNumBlocksAndThreads(s, maxBlocks, maxThreads);
blocks = xblock_ythread.x;
threads = xblock_ythread.y;
/* Copy the first d_odata back into d_idata2 */
gpuErrchk(cudaMemcpy(imaxflt, d_odata_imax, s*sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(iminflt, d_odata_imin, s*sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(jmaxflt, d_odata_jmax, s*sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(jminflt, d_odata_jmin, s*sizeof(float), cudaMemcpyDeviceToHost));
maxz<float>(s, threads, blocks, whichKernel, imaxflt, d_odata_imax);
checkErrorAfterKernelLaunch("maxz<float> for imaxflt in compute_xlim_ylim");
maxz<float>(s, threads, blocks, whichKernel, jmaxflt, d_odata_jmax);
checkErrorAfterKernelLaunch("maxz<float> for jmaxflt in compute_xlim_ylim");
minz<float>(s, threads, blocks, whichKernel, iminflt, d_odata_imin);
checkErrorAfterKernelLaunch("minz<float> for iminflt in compute_xlim_ylim");
minz<float>(s, threads, blocks, whichKernel, jminflt, d_odata_jmin);
checkErrorAfterKernelLaunch("minz<float> for jminflt in compute_xlim_ylim");
// maxzexp<float>(s, threads, blocks, whichKernel, imaxflt, jmaxflt, iminflt,
// jminflt, d_odata_imax, d_odata_jmax, d_odata_imin, d_odata_jmin,
// &stream1, &stream2, &stream3, &stream4);
if (whichKernel < 3)
s = (s + threads - 1) / threads;
else
s = (s + (threads*2-1)) / (threads*2);
if (s > 1)
printf("s is bigger than one");
}
/* Sync streams */
// gpuErrchk(cudaStreamSynchronize(stream1));
// gpuErrchk(cudaStreamSynchronize(stream2));
// gpuErrchk(cudaStreamSynchronize(stream3));
// gpuErrchk(cudaStreamSynchronize(stream4));
/* Calculate the min/max overall values (regardless of POS frame limits) */
set_xlim_ylim_krnl<<<1,1>>>(ddat, set, frm, src, d_odata_imax, d_odata_imin,
d_odata_jmax, d_odata_jmin, minmax_overall);
checkErrorAfterKernelLaunch("set_xlim_ylim_krnl in compute_xlim_ylim");
/* Nuke streams */
// gpuErrchk(cudaStreamDestroy(stream1));
// gpuErrchk(cudaStreamDestroy(stream2));
// gpuErrchk(cudaStreamDestroy(stream3));
// gpuErrchk(cudaStreamDestroy(stream4));
cudaFree(d_odata_imax);
cudaFree(d_odata_imin);
cudaFree(d_odata_jmax);
cudaFree(d_odata_jmin);
}
__host__ void c2af_deldop_add_o2_m2(
float **temp_o2,
float **temp_m2,
float **temp_om,
int size,
int nframes) {
/* Function reduces the input arrays for nframes-frames, once per frame.
* The input array is structured like this: input[nframes][size] */
int maxThreads = maxThreadsPerBlock;
int maxBlocks = 2048;
int numBlocks = 0; // initialize numBlocks
int numThreads = 0; // initialize numThreads
float *d_odata_o2, *d_odata_m2, *d_odata_om;
float *d_idata_o2, *d_idata_m2, *d_idata_om;
float2 xblock_ythread;
dim3 BLK,THD;
BLK.x = floor((maxThreadsPerBlock - 1 + size)/maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
/* Find number of blocks & threads needed to reduce ONE FRAME ONLY */
xblock_ythread = getNumBlocksAndThreads(size, maxBlocks, maxThreads);
numBlocks = xblock_ythread.x;
numThreads = xblock_ythread.y;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
/* Allocate memory for d_idata and d_odata */
cudaCalloc((void**)&d_idata_o2, sizeof(float), size);
cudaCalloc((void**)&d_odata_o2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_idata_m2, sizeof(float), size);
cudaCalloc((void**)&d_odata_m2, sizeof(float), numBlocks);
cudaCalloc((void**)&d_idata_om, sizeof(float), size);
cudaCalloc((void**)&d_odata_om, sizeof(float), numBlocks);
for (int frm=0; frm<nframes; frm++) {
c2af_set_data_krnl<<<BLK,THD>>>(temp_o2, d_idata_o2, frm, size);
checkErrorAfterKernelLaunch("c2af_set_data_krnl in reduction.cu");
c2af_set_data_krnl<<<BLK,THD>>>(temp_m2, d_idata_m2, frm, size);
checkErrorAfterKernelLaunch("c2af_set_data_krnl in reduction.cu");
c2af_set_data_krnl<<<BLK,THD>>>(temp_om, d_idata_om, frm, size);
checkErrorAfterKernelLaunch("c2af_set_data_krnl in reduction.cu");
/* Call reduction */
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata_o2,
d_odata_o2, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata_m2,
d_odata_m2, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel");
device_reduce_block_atomic_kernel<<< dimGrid, dimBlock>>>(d_idata_om,
d_odata_om, size);
checkErrorAfterKernelLaunch("device_reduce_block_atomic_kernel");
deviceSyncAfterKernelLaunch("device_reduce_block_atomic_kernel");
temp_o2[frm][0] = d_odata_o2[0];
temp_m2[frm][0] = d_odata_m2[0];
temp_om[frm][0] = d_odata_om[0];
gpuErrchk(cudaMemset(d_odata_o2, 0, numBlocks*sizeof(float)));
gpuErrchk(cudaMemset(d_odata_m2, 0, numBlocks*sizeof(float)));
gpuErrchk(cudaMemset(d_odata_om, 0, numBlocks*sizeof(float)));
}
/* Output sum for each frame is in first entry for each frame in the
* input array */
cudaFree(d_odata_o2);
cudaFree(d_idata_o2);
cudaFree(d_odata_m2);
cudaFree(d_idata_m2);
cudaFree(d_odata_om);
cudaFree(d_idata_om);
}
//#endif // #ifndef _REDUCE_KERNEL_H_
|
f5215d6956cb1bcd441e4dbc8d4b342e8f33fbc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void div_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] / dx[i];
}
} | f5215d6956cb1bcd441e4dbc8d4b342e8f33fbc5.cu | #include "includes.h"
extern "C"
__global__ void div_strided_double(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= xOffset && i >= yOffset && i % incx == 0 && i % incy == 0)
result[i] = dy[i] / dx[i];
}
} |
e9db76c5676e3643676fce2a116ddd270f76934a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
#include <cuda_gl_interop.h>
#include <cutil_inline.h>
#include "bbsort.cuh"
#include "costFunctionsGPU.cuh"
float4* dRegSortedData;
float* dRegVolData;
unsigned int* dJointHist;
unsigned int* dRegHistCopy;
unsigned int* dBaseHist;
unsigned int* dRegHist;
float3* dEntropies;
float3* hEntropies;
unsigned int* hRegHist;
unsigned int* hBaseHist;
unsigned int* hJointHist;
unsigned int* hBlockPivots;
unsigned int* dBlockPivots;
hipArray *dBaseArray;
float3 hBaseCenter,hRegCenter;
#include "costFunctionsGPU_kernel.cu"
void initRegFloat4(Volume* baseVol,int3 size)
{
unsigned int binCount;
if(baseVol->dim.x ==8.0)
binCount=FLIRT_BIN_COUNT_8MM;
else if(baseVol->dim.x ==4.0)
binCount=FLIRT_BIN_COUNT_4MM;
else
binCount=FLIRT_BIN_COUNT;
const unsigned int binWidth=256/binCount;
unsigned int blockSize = 512;
unsigned int totalPoints = size.x * size.y * size.z ;
unsigned int blockCount;
if(totalPoints%blockSize==0)
blockCount=totalPoints/blockSize;
else
blockCount=totalPoints/blockSize+1;
hipLaunchKernelGGL(( buildRegFloat4Kernel), dim3(blockCount),dim3(blockSize), 0, 0, dRegSortedData,dRegVolData,size,totalPoints);
bbSort(dRegSortedData, totalPoints);
float4* hRegSortedData = new float4[totalPoints];
hipMemcpy(hRegSortedData, dRegSortedData, sizeof(float4) * totalPoints, hipMemcpyDeviceToHost);
hBlockPivots[0]=0;
unsigned int offset=0;
for(unsigned int target=1;target<= binCount; target++)
{
while( (unsigned int)hRegSortedData[offset].w / binWidth < target)
{
if(offset >= totalPoints)
break;
else
offset++;
}
hBlockPivots[target] = offset;
}
hipMemcpy(dBlockPivots,hBlockPivots , sizeof(unsigned int) * (binCount + 1), hipMemcpyHostToDevice);
delete hRegSortedData;
}
void initVolumeData(Volume* baseVol, Volume* regVol)
{
int binCount;
if(baseVol->dim.x ==8.0)
binCount=FLIRT_BIN_COUNT_8MM;
else if(baseVol->dim.x ==4.0)
binCount=FLIRT_BIN_COUNT_4MM;
else
binCount=FLIRT_BIN_COUNT;
//const unsigned int binWidth=256/binCount;
hEntropies = new float3[binCount];
hRegHist = new unsigned int[binCount];
hBaseHist=new unsigned int[binCount];
hJointHist=new unsigned int[binCount*binCount];
hBlockPivots=new unsigned int[binCount + 1];
float3 baseCenter={baseVol->center.x / baseVol->dim.x, baseVol->center.y / baseVol->dim.y, baseVol->center.z / baseVol->dim.z};
float3 regCenter={regVol->center.x / regVol->dim.x, regVol->center.y / regVol->dim.y, regVol->center.z / regVol->dim.z};
hipMemcpyToSymbol( cBaseCenter, &baseCenter, sizeof(float3)) ;
cutilCheckMsg("hipMemcpyToSymbol baseVol failed");
hipMemcpyToSymbol( cRegCenter, ®Center, sizeof(float3)) ;
cutilCheckMsg("hipMemcpyToSymbol regVol failed");
hipExtent baseVolumeSize = make_hipExtent(baseVol -> size.x, baseVol -> size.y, baseVol -> size.z);
// create 3D array
hipChannelFormatDesc baseDesc = hipCreateChannelDesc<float>();
cutilSafeCall( hipMalloc3DArray(&dBaseArray, &baseDesc, baseVolumeSize) );
// copy data to 3D array
hipMemcpy3DParms baseCopyParams = {0};
baseCopyParams.srcPtr = make_hipPitchedPtr((void*)baseVol->data, baseVolumeSize.width*sizeof(float), baseVolumeSize.width, baseVolumeSize.height);
baseCopyParams.dstArray = dBaseArray;
baseCopyParams.extent = baseVolumeSize;
baseCopyParams.kind = hipMemcpyHostToDevice;
cutilSafeCall( hipMemcpy3D(&baseCopyParams) );
// set texture parameters
tBaseVolData.normalized = false; // access with normalized texture coordinates
if(!FLIRT_HARDWARE_INTERPOLATION || baseVol->dim.x >=4.0)
tBaseVolData.filterMode = hipFilterModePoint;
else
tBaseVolData.filterMode = hipFilterModeLinear;
tBaseVolData.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
tBaseVolData.addressMode[1] = hipAddressModeWrap;
tBaseVolData.addressMode[2] = hipAddressModeWrap;
// bind array to 3D texture
cutilSafeCall(hipBindTextureToArray(tBaseVolData, dBaseArray, baseDesc));
const unsigned int regTotalSize=(regVol -> size.x * regVol ->size.y * regVol -> size. z);
CUDA_SAFE_CALL(hipMalloc((void**)&dRegVolData, sizeof(float) * regTotalSize));
CUDA_SAFE_CALL(hipMemcpy(dRegVolData, regVol->data,sizeof(float) * regTotalSize, hipMemcpyHostToDevice));
//Alloc memory
CUDA_SAFE_CALL(hipMalloc((void**)&dJointHist, sizeof(unsigned int) * binCount * binCount));
CUDA_SAFE_CALL(hipMalloc((void**)&dRegHistCopy, sizeof(unsigned int) * binCount ));
CUDA_SAFE_CALL(hipMalloc((void**)&dBaseHist, sizeof(unsigned int) * binCount));
CUDA_SAFE_CALL(hipMalloc((void**)&dRegHist, sizeof(unsigned int) * binCount));
CUDA_SAFE_CALL(hipMalloc((void**)&dEntropies, sizeof(float3) * binCount));
CUDA_SAFE_CALL(hipMalloc((void**)&dBlockPivots, sizeof(unsigned int) * (binCount + 1)));
CUDA_SAFE_CALL(hipMalloc((void**)&dRegSortedData, sizeof(float4) * regTotalSize));
initRegFloat4(baseVol,regVol -> size);
}
void freeVolumeData(bool clearCpuBuffer)
{
//Unbind textures
hipUnbindTexture( tRegVolData );
//Free arrays
hipFreeArray( dBaseArray );
CUDA_SAFE_CALL(hipFree(dJointHist));
CUDA_SAFE_CALL(hipFree(dRegHistCopy));
CUDA_SAFE_CALL(hipFree(dBaseHist));
CUDA_SAFE_CALL(hipFree(dRegHist));
CUDA_SAFE_CALL(hipFree(dEntropies));
CUDA_SAFE_CALL(hipFree(dRegVolData));
CUDA_SAFE_CALL(hipFree(dRegSortedData));
CUDA_SAFE_CALL(hipFree(dBlockPivots));
/*CUDA_SAFE_CALL(hipFree(cBaseCenter));
CUDA_SAFE_CALL(hipFree(cRegCenter));*/
delete hEntropies;
delete hRegHist;
delete hJointHist;
delete hBaseHist;
delete hBlockPivots;
}
void calcEntropyGPU(Volume* baseVol, Volume* regVol, Matrix44* dAffine, float &baseEntropy, float& regEntropy, float& jointEntropy)
{
//unsigned int timer;
baseEntropy=0;
regEntropy=0;
jointEntropy=0;
unsigned int binCount;
if(baseVol->dim.x ==8.0)
binCount=FLIRT_BIN_COUNT_8MM;
else if(baseVol->dim.x ==4.0)
binCount=FLIRT_BIN_COUNT_4MM;
else
binCount=FLIRT_BIN_COUNT;
//printf("%d,%d,%d\n",regVol->size.x,regVol->size.y,regVol->size.z);
//hipMemset(dBaseHist, 0,sizeof(unsigned int) * binCount);
/*float mat[16];
for(int i=0;i<4;i++)
memcpy(mat+(i*4),dAffine->data[i],sizeof(float)*4);*/
cutilCheckMsg("hipMemcpyToSymbol before cAffine failed");
hipMemcpyToSymbol( cAffine,dAffine->data , sizeof(float)*16) ;
cutilCheckMsg("hipMemcpyToSymbol after cAffine failed");
unsigned int totalPoints = regVol->size.x * regVol->size.y * regVol->size.z ;
unsigned int blockCount = binCount;
unsigned int binShift = 0;
////if the first sub bin is too big, calculate it invidually
if(hBlockPivots[1] > binCount * binCount)
{
binShift=1;
if(!FLIRT_HARDWARE_INTERPOLATION || baseVol->dim.x >=4.0)
hipLaunchKernelGGL(( calcFirstBinKernel), dim3(binCount),dim3(WARP_SIZE * WARP_COUNT), binCount * WARP_COUNT * sizeof(unsigned int) , 0, dRegSortedData,hBlockPivots[1],baseVol->size,regVol->size,dJointHist);
else
hipLaunchKernelGGL(( calcFirstBinHardwareInterpolatedKernel), dim3(binCount),dim3(WARP_SIZE * WARP_COUNT), binCount * WARP_COUNT * sizeof(unsigned int) , 0, dRegSortedData,hBlockPivots[1],baseVol->size,regVol->size,dJointHist);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
cutilCheckMsg("calcFirstBinKernel failed");
unsigned int step = binCount * binCount /2;
//Reduce (binCount * binCount) to binCount
while(step > binCount/2)
{
if( step > 1024 )
blockCount = binCount/2;
else blockCount = 16;
int blockSize = step / blockCount;
hipLaunchKernelGGL(( reduceFirstBinKernel), dim3(blockCount),dim3(blockSize), 0, 0, dJointHist,step);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
step /=2 ;
}
hipMemcpy(hRegHist, dJointHist, sizeof(unsigned int)*binCount, hipMemcpyDeviceToHost);
unsigned int firstBinSum=0;
for(unsigned int i=0;i<binCount;i++)
firstBinSum += hRegHist[i];
hipMemcpy(dRegHist, &firstBinSum, sizeof(unsigned int), hipMemcpyHostToDevice);
blockCount = binCount -1;
}
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
cutilCheckMsg("before calcJointHistKernel failed");
// calculate the joint hist in parallel
if(!FLIRT_HARDWARE_INTERPOLATION || baseVol->dim.x >=4.0)
hipLaunchKernelGGL(( calcJointHistKernel), dim3(blockCount),dim3(WARP_SIZE * WARP_COUNT), binCount * WARP_COUNT * sizeof(unsigned int) , 0, dRegSortedData,dBlockPivots,baseVol->size,regVol->size,dJointHist,dRegHist,totalPoints,binShift);
else
hipLaunchKernelGGL(( calcJointHistHardwareInterpolatedKernel), dim3(blockCount),dim3(WARP_SIZE * WARP_COUNT), binCount * WARP_COUNT * sizeof(unsigned int) , 0, dRegSortedData,dBlockPivots,baseVol->size,regVol->size,dJointHist,dRegHist,totalPoints,binShift);
cutilCheckMsg("after calcJointHistKernel failed");
//CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUT_SAFE_CALL(cutStartTimer(timer));
hipMemcpy(hRegHist, dRegHist, sizeof(unsigned int)*binCount, hipMemcpyDeviceToHost);
unsigned int jointSum=0;
for(unsigned int i=0;i<binCount;i++)
jointSum += hRegHist[i];
float logJointSum = log((float)jointSum);
//CUDA_SAFE_CALL( hipDeviceSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//float t = cutGetAverageTimerValue(timer);
// calculate the base distribution in parallel
//calcBaseHistKernel<<<binCount,binCount>>>(dJointHist,dBaseHist);
//calcJointEntropies<<<binCount,binCount,sizeof(float)*binCount>>>(dJointHist,(float)jointSum,dEntropies,logJointSum);
//calcBaseRegEntropies<<<8,binCount/8>>>(dBaseHist,dRegHist,(float)jointSum,dEntropies,logJointSum);
//hipMemcpy(hEntropies, dEntropies, sizeof(float3)*binCount, hipMemcpyDeviceToHost);
hipMemcpy(hJointHist, dJointHist, sizeof(unsigned int)*binCount*binCount, hipMemcpyDeviceToHost);
hipMemcpy(hBaseHist, dBaseHist, sizeof(unsigned int)*binCount, hipMemcpyDeviceToHost);
memset(hBaseHist,0,sizeof(unsigned int)*binCount);
//float3 result={0,0,0};
for(unsigned int i=0;i<binCount;i++)
{
for(unsigned int j=0;j<binCount;j++)
{
if(hJointHist[i*binCount + j])jointEntropy+= -(float)hJointHist[i*binCount + j]/jointSum * (log((float)hJointHist[i*binCount + j])-logJointSum);
hBaseHist[i]+=hJointHist[j*binCount + i];
}
if(hBaseHist[i])baseEntropy+= -(float)hBaseHist[i]/jointSum * (log((float)hBaseHist[i])-logJointSum);
if(hRegHist[i])regEntropy+= -(float)hRegHist[i]/jointSum * (log((float)hRegHist[i])-logJointSum);
}
//printf("%f ",(jointEntropy-regEntropy-baseEntropy));
//printf("JointSum:%d,MI:%f,%f,%f\n ",jointSum,jointEntropy,regEntropy,baseEntropy);
//printf("%f ",t);
}
| e9db76c5676e3643676fce2a116ddd270f76934a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <cstdlib>
#include <cstdio>
#include <GL/glew.h>
#if defined (__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#endif
#include <cuda_gl_interop.h>
#include <cutil_inline.h>
#include "bbsort.cuh"
#include "costFunctionsGPU.cuh"
float4* dRegSortedData;
float* dRegVolData;
unsigned int* dJointHist;
unsigned int* dRegHistCopy;
unsigned int* dBaseHist;
unsigned int* dRegHist;
float3* dEntropies;
float3* hEntropies;
unsigned int* hRegHist;
unsigned int* hBaseHist;
unsigned int* hJointHist;
unsigned int* hBlockPivots;
unsigned int* dBlockPivots;
cudaArray *dBaseArray;
float3 hBaseCenter,hRegCenter;
#include "costFunctionsGPU_kernel.cu"
void initRegFloat4(Volume* baseVol,int3 size)
{
unsigned int binCount;
if(baseVol->dim.x ==8.0)
binCount=FLIRT_BIN_COUNT_8MM;
else if(baseVol->dim.x ==4.0)
binCount=FLIRT_BIN_COUNT_4MM;
else
binCount=FLIRT_BIN_COUNT;
const unsigned int binWidth=256/binCount;
unsigned int blockSize = 512;
unsigned int totalPoints = size.x * size.y * size.z ;
unsigned int blockCount;
if(totalPoints%blockSize==0)
blockCount=totalPoints/blockSize;
else
blockCount=totalPoints/blockSize+1;
buildRegFloat4Kernel<<<blockCount,blockSize>>>(dRegSortedData,dRegVolData,size,totalPoints);
bbSort(dRegSortedData, totalPoints);
float4* hRegSortedData = new float4[totalPoints];
cudaMemcpy(hRegSortedData, dRegSortedData, sizeof(float4) * totalPoints, cudaMemcpyDeviceToHost);
hBlockPivots[0]=0;
unsigned int offset=0;
for(unsigned int target=1;target<= binCount; target++)
{
while( (unsigned int)hRegSortedData[offset].w / binWidth < target)
{
if(offset >= totalPoints)
break;
else
offset++;
}
hBlockPivots[target] = offset;
}
cudaMemcpy(dBlockPivots,hBlockPivots , sizeof(unsigned int) * (binCount + 1), cudaMemcpyHostToDevice);
delete hRegSortedData;
}
void initVolumeData(Volume* baseVol, Volume* regVol)
{
int binCount;
if(baseVol->dim.x ==8.0)
binCount=FLIRT_BIN_COUNT_8MM;
else if(baseVol->dim.x ==4.0)
binCount=FLIRT_BIN_COUNT_4MM;
else
binCount=FLIRT_BIN_COUNT;
//const unsigned int binWidth=256/binCount;
hEntropies = new float3[binCount];
hRegHist = new unsigned int[binCount];
hBaseHist=new unsigned int[binCount];
hJointHist=new unsigned int[binCount*binCount];
hBlockPivots=new unsigned int[binCount + 1];
float3 baseCenter={baseVol->center.x / baseVol->dim.x, baseVol->center.y / baseVol->dim.y, baseVol->center.z / baseVol->dim.z};
float3 regCenter={regVol->center.x / regVol->dim.x, regVol->center.y / regVol->dim.y, regVol->center.z / regVol->dim.z};
cudaMemcpyToSymbol( cBaseCenter, &baseCenter, sizeof(float3)) ;
cutilCheckMsg("cudaMemcpyToSymbol baseVol failed");
cudaMemcpyToSymbol( cRegCenter, ®Center, sizeof(float3)) ;
cutilCheckMsg("cudaMemcpyToSymbol regVol failed");
cudaExtent baseVolumeSize = make_cudaExtent(baseVol -> size.x, baseVol -> size.y, baseVol -> size.z);
// create 3D array
cudaChannelFormatDesc baseDesc = cudaCreateChannelDesc<float>();
cutilSafeCall( cudaMalloc3DArray(&dBaseArray, &baseDesc, baseVolumeSize) );
// copy data to 3D array
cudaMemcpy3DParms baseCopyParams = {0};
baseCopyParams.srcPtr = make_cudaPitchedPtr((void*)baseVol->data, baseVolumeSize.width*sizeof(float), baseVolumeSize.width, baseVolumeSize.height);
baseCopyParams.dstArray = dBaseArray;
baseCopyParams.extent = baseVolumeSize;
baseCopyParams.kind = cudaMemcpyHostToDevice;
cutilSafeCall( cudaMemcpy3D(&baseCopyParams) );
// set texture parameters
tBaseVolData.normalized = false; // access with normalized texture coordinates
if(!FLIRT_HARDWARE_INTERPOLATION || baseVol->dim.x >=4.0)
tBaseVolData.filterMode = cudaFilterModePoint;
else
tBaseVolData.filterMode = cudaFilterModeLinear;
tBaseVolData.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
tBaseVolData.addressMode[1] = cudaAddressModeWrap;
tBaseVolData.addressMode[2] = cudaAddressModeWrap;
// bind array to 3D texture
cutilSafeCall(cudaBindTextureToArray(tBaseVolData, dBaseArray, baseDesc));
const unsigned int regTotalSize=(regVol -> size.x * regVol ->size.y * regVol -> size. z);
CUDA_SAFE_CALL(cudaMalloc((void**)&dRegVolData, sizeof(float) * regTotalSize));
CUDA_SAFE_CALL(cudaMemcpy(dRegVolData, regVol->data,sizeof(float) * regTotalSize, cudaMemcpyHostToDevice));
//Alloc memory
CUDA_SAFE_CALL(cudaMalloc((void**)&dJointHist, sizeof(unsigned int) * binCount * binCount));
CUDA_SAFE_CALL(cudaMalloc((void**)&dRegHistCopy, sizeof(unsigned int) * binCount ));
CUDA_SAFE_CALL(cudaMalloc((void**)&dBaseHist, sizeof(unsigned int) * binCount));
CUDA_SAFE_CALL(cudaMalloc((void**)&dRegHist, sizeof(unsigned int) * binCount));
CUDA_SAFE_CALL(cudaMalloc((void**)&dEntropies, sizeof(float3) * binCount));
CUDA_SAFE_CALL(cudaMalloc((void**)&dBlockPivots, sizeof(unsigned int) * (binCount + 1)));
CUDA_SAFE_CALL(cudaMalloc((void**)&dRegSortedData, sizeof(float4) * regTotalSize));
initRegFloat4(baseVol,regVol -> size);
}
void freeVolumeData(bool clearCpuBuffer)
{
//Unbind textures
cudaUnbindTexture( tRegVolData );
//Free arrays
cudaFreeArray( dBaseArray );
CUDA_SAFE_CALL(cudaFree(dJointHist));
CUDA_SAFE_CALL(cudaFree(dRegHistCopy));
CUDA_SAFE_CALL(cudaFree(dBaseHist));
CUDA_SAFE_CALL(cudaFree(dRegHist));
CUDA_SAFE_CALL(cudaFree(dEntropies));
CUDA_SAFE_CALL(cudaFree(dRegVolData));
CUDA_SAFE_CALL(cudaFree(dRegSortedData));
CUDA_SAFE_CALL(cudaFree(dBlockPivots));
/*CUDA_SAFE_CALL(cudaFree(cBaseCenter));
CUDA_SAFE_CALL(cudaFree(cRegCenter));*/
delete hEntropies;
delete hRegHist;
delete hJointHist;
delete hBaseHist;
delete hBlockPivots;
}
void calcEntropyGPU(Volume* baseVol, Volume* regVol, Matrix44* dAffine, float &baseEntropy, float& regEntropy, float& jointEntropy)
{
//unsigned int timer;
baseEntropy=0;
regEntropy=0;
jointEntropy=0;
unsigned int binCount;
if(baseVol->dim.x ==8.0)
binCount=FLIRT_BIN_COUNT_8MM;
else if(baseVol->dim.x ==4.0)
binCount=FLIRT_BIN_COUNT_4MM;
else
binCount=FLIRT_BIN_COUNT;
//printf("%d,%d,%d\n",regVol->size.x,regVol->size.y,regVol->size.z);
//cudaMemset(dBaseHist, 0,sizeof(unsigned int) * binCount);
/*float mat[16];
for(int i=0;i<4;i++)
memcpy(mat+(i*4),dAffine->data[i],sizeof(float)*4);*/
cutilCheckMsg("cudaMemcpyToSymbol before cAffine failed");
cudaMemcpyToSymbol( cAffine,dAffine->data , sizeof(float)*16) ;
cutilCheckMsg("cudaMemcpyToSymbol after cAffine failed");
unsigned int totalPoints = regVol->size.x * regVol->size.y * regVol->size.z ;
unsigned int blockCount = binCount;
unsigned int binShift = 0;
////if the first sub bin is too big, calculate it invidually
if(hBlockPivots[1] > binCount * binCount)
{
binShift=1;
if(!FLIRT_HARDWARE_INTERPOLATION || baseVol->dim.x >=4.0)
calcFirstBinKernel<<< binCount,WARP_SIZE * WARP_COUNT, binCount * WARP_COUNT * sizeof(unsigned int) >>>(dRegSortedData,hBlockPivots[1],baseVol->size,regVol->size,dJointHist);
else
calcFirstBinHardwareInterpolatedKernel<<< binCount,WARP_SIZE * WARP_COUNT, binCount * WARP_COUNT * sizeof(unsigned int) >>>(dRegSortedData,hBlockPivots[1],baseVol->size,regVol->size,dJointHist);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
cutilCheckMsg("calcFirstBinKernel failed");
unsigned int step = binCount * binCount /2;
//Reduce (binCount * binCount) to binCount
while(step > binCount/2)
{
if( step > 1024 )
blockCount = binCount/2;
else blockCount = 16;
int blockSize = step / blockCount;
reduceFirstBinKernel<<<blockCount,blockSize>>>(dJointHist,step);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
step /=2 ;
}
cudaMemcpy(hRegHist, dJointHist, sizeof(unsigned int)*binCount, cudaMemcpyDeviceToHost);
unsigned int firstBinSum=0;
for(unsigned int i=0;i<binCount;i++)
firstBinSum += hRegHist[i];
cudaMemcpy(dRegHist, &firstBinSum, sizeof(unsigned int), cudaMemcpyHostToDevice);
blockCount = binCount -1;
}
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
cutilCheckMsg("before calcJointHistKernel failed");
// calculate the joint hist in parallel
if(!FLIRT_HARDWARE_INTERPOLATION || baseVol->dim.x >=4.0)
calcJointHistKernel<<< blockCount,WARP_SIZE * WARP_COUNT, binCount * WARP_COUNT * sizeof(unsigned int) >>>(dRegSortedData,dBlockPivots,baseVol->size,regVol->size,dJointHist,dRegHist,totalPoints,binShift);
else
calcJointHistHardwareInterpolatedKernel<<< blockCount,WARP_SIZE * WARP_COUNT, binCount * WARP_COUNT * sizeof(unsigned int) >>>(dRegSortedData,dBlockPivots,baseVol->size,regVol->size,dJointHist,dRegHist,totalPoints,binShift);
cutilCheckMsg("after calcJointHistKernel failed");
//CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUT_SAFE_CALL(cutStartTimer(timer));
cudaMemcpy(hRegHist, dRegHist, sizeof(unsigned int)*binCount, cudaMemcpyDeviceToHost);
unsigned int jointSum=0;
for(unsigned int i=0;i<binCount;i++)
jointSum += hRegHist[i];
float logJointSum = log((float)jointSum);
//CUDA_SAFE_CALL( cudaThreadSynchronize() );
//CUT_SAFE_CALL(cutStopTimer(timer));
//float t = cutGetAverageTimerValue(timer);
// calculate the base distribution in parallel
//calcBaseHistKernel<<<binCount,binCount>>>(dJointHist,dBaseHist);
//calcJointEntropies<<<binCount,binCount,sizeof(float)*binCount>>>(dJointHist,(float)jointSum,dEntropies,logJointSum);
//calcBaseRegEntropies<<<8,binCount/8>>>(dBaseHist,dRegHist,(float)jointSum,dEntropies,logJointSum);
//cudaMemcpy(hEntropies, dEntropies, sizeof(float3)*binCount, cudaMemcpyDeviceToHost);
cudaMemcpy(hJointHist, dJointHist, sizeof(unsigned int)*binCount*binCount, cudaMemcpyDeviceToHost);
cudaMemcpy(hBaseHist, dBaseHist, sizeof(unsigned int)*binCount, cudaMemcpyDeviceToHost);
memset(hBaseHist,0,sizeof(unsigned int)*binCount);
//float3 result={0,0,0};
for(unsigned int i=0;i<binCount;i++)
{
for(unsigned int j=0;j<binCount;j++)
{
if(hJointHist[i*binCount + j])jointEntropy+= -(float)hJointHist[i*binCount + j]/jointSum * (log((float)hJointHist[i*binCount + j])-logJointSum);
hBaseHist[i]+=hJointHist[j*binCount + i];
}
if(hBaseHist[i])baseEntropy+= -(float)hBaseHist[i]/jointSum * (log((float)hBaseHist[i])-logJointSum);
if(hRegHist[i])regEntropy+= -(float)hRegHist[i]/jointSum * (log((float)hRegHist[i])-logJointSum);
}
//printf("%f ",(jointEntropy-regEntropy-baseEntropy));
//printf("JointSum:%d,MI:%f,%f,%f\n ",jointSum,jointEntropy,regEntropy,baseEntropy);
//printf("%f ",t);
}
|
8162111b5c2117efd0e3aa37c7e7e9728fba0a76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated s Wed Nov 14 22:53:49 2012
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_lower( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_upper( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
extern "C" void
magmablas_ssymmetrize( char uplo, magma_int_t m, float *dA, magma_int_t ldda )
{
/*
Purpose
=======
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of the matrix dA that is valid on input.
= 'U': Upper triangular part
= 'L': Lower triangular part
M (input) INTEGER
The number of rows of the matrix dA. M >= 0.
dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by m matrix dA.
LDDA (input) INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
===================================================================== */
//printf( "m %d, grid %d, threads %d\n", m, grid.x, threads.x );
if ( m == 0 )
return;
assert( m >= 0 );
assert( ldda >= m );
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
if ( (uplo == 'U') || (uplo == 'u') ) {
hipLaunchKernelGGL(( ssymmetrize_upper), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda );
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
hipLaunchKernelGGL(( ssymmetrize_lower), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda );
}
else {
printf( "uplo has illegal value\n" );
exit(1);
}
}
| 8162111b5c2117efd0e3aa37c7e7e9728fba0a76.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated s Wed Nov 14 22:53:49 2012
@author Mark Gates
*/
#include "common_magma.h"
#include <assert.h>
#define NB 64
/*
Matrix is m x m, and is divided into block rows, each NB x m.
Each block has NB threads.
Each thread copies one row, iterating across all columns below diagonal.
The bottom block of rows may be partially outside the matrix;
if so, rows outside the matrix (i >= m) are disabled.
*/
__global__ void
ssymmetrize_lower( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dAT = (*dA); // upper := lower
dA += ldda;
dAT += 1;
}
}
}
// only difference with _lower version is direction dA=dAT instead of dAT=dA.
__global__ void
ssymmetrize_upper( int m, float *dA, int ldda )
{
// dA iterates across row i and dAT iterates down column i.
int i = blockIdx.x*NB + threadIdx.x;
float *dAT = dA;
if ( i < m ) {
dA += i;
dAT += i*ldda;
float *dAend = dA + i*ldda;
while( dA < dAend ) {
*dA = (*dAT); // lower := upper
dA += ldda;
dAT += 1;
}
}
}
extern "C" void
magmablas_ssymmetrize( char uplo, magma_int_t m, float *dA, magma_int_t ldda )
{
/*
Purpose
=======
SSYMMETRIZE copies lower triangle to upper triangle, or vice-versa,
to make dA a general representation of a symmetric matrix.
Arguments
=========
UPLO (input) CHARACTER*1
Specifies the part of the matrix dA that is valid on input.
= 'U': Upper triangular part
= 'L': Lower triangular part
M (input) INTEGER
The number of rows of the matrix dA. M >= 0.
dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N)
The m by m matrix dA.
LDDA (input) INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
===================================================================== */
//printf( "m %d, grid %d, threads %d\n", m, grid.x, threads.x );
if ( m == 0 )
return;
assert( m >= 0 );
assert( ldda >= m );
dim3 threads( NB );
dim3 grid( (m + NB - 1)/NB );
if ( (uplo == 'U') || (uplo == 'u') ) {
ssymmetrize_upper<<< grid, threads, 0, magma_stream >>>( m, dA, ldda );
}
else if ( (uplo == 'L') || (uplo == 'l') ) {
ssymmetrize_lower<<< grid, threads, 0, magma_stream >>>( m, dA, ldda );
}
else {
printf( "uplo has illegal value\n" );
exit(1);
}
}
|
26f49224cb7e67250a89b19541577d49ac2dc6a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cunn_CriterionFilter_updateOutput_kernel( float *target, float *ignored_label, int bound, int batch_size, int map_nelem, int blocks_per_sample)
{
int i;
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ignored_label_num = (int)(ignored_label[0]);
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) {
if (target[toffset + i] == ignored_label_num) {
target[toffset + i] = (float) bound + 1;
}
}
} | 26f49224cb7e67250a89b19541577d49ac2dc6a7.cu | #include "includes.h"
__global__ void cunn_CriterionFilter_updateOutput_kernel( float *target, float *ignored_label, int bound, int batch_size, int map_nelem, int blocks_per_sample)
{
int i;
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ignored_label_num = (int)(ignored_label[0]);
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) {
if (target[toffset + i] == ignored_label_num) {
target[toffset + i] = (float) bound + 1;
}
}
} |
9808db7b37ceae0139af649f9020b16becade62b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <float.h>
#include "../../include/cuda/nndct_fix_kernels.cuh"
#include "../../include/cuda/nndct_fix_kernels.h"
#include "../../include/cuda/nndct_cuda_math.h"
#include "../../include/cuda/nndct_cu_utils.h"
#include "../../include/cuda/table_data.h"
template<typename Dtype>
__global__ static void _cuda_vai_round(const int N,
const Dtype* src,
Dtype* dst,
int method){
NNDCT_KERNEL_LOOP(index, N){
int result_ = 0;
_vai_round_device(src[index], result_, method);
dst[index] = result_;
}
}
template<typename Dtype>
void cuda_vai_round(const int N,
const Dtype* src,
Dtype* dst,
int method){
hipLaunchKernelGGL(( _cuda_vai_round), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
src,
dst,
method);
}
template
void cuda_vai_round<float>(const int N,
const float* src,
float* dst,
int method);
template
void cuda_vai_round<double>(const int N,
const double* src,
double* dst,
int method);
template<typename Dtype>
__global__ static void _fix_neuron_v1(const int N,
const Dtype* src,
const Dtype* fragpos,
Dtype* dst,
int val_min,
int val_max,
int keep_scale,
int method){
NNDCT_KERNEL_LOOP(index, N){
//method:
//1: dummy
//2: for CNN feature map
//3: for weights and bias
//4: for RNN feature map
int result_ = 0;
Dtype val_amp = pow(2, *fragpos);
_fix_neuron_v2_device(src[index],
result_,
val_min,
val_max,
val_amp,
0,
method);
if(0 != keep_scale)
dst[index] = Dtype(result_) * (1 / val_amp);
else
dst[index] = result_;
}
}
template<typename Dtype>
__global__ static void _fix_neuron_v2(const int N,
const Dtype* src,
Dtype* dst,
int val_min,
int val_max,
Dtype val_amp,
int zero_point,
int keep_scale,
int method){
NNDCT_KERNEL_LOOP(index, N){
//method:
//1: dummy
//2: for CNN feature map
//3: for weights and bias
//4: for RNN feature map
int result_ = 0;
_fix_neuron_v2_device(src[index],
result_,
val_min,
val_max,
val_amp,
zero_point,
method);
//printf( "$$$$$$$$$$$ result: %d zero_point: %d keep_scale: %d\n", result_, zero_point, keep_scale);
if(0 != keep_scale)
dst[index] = (Dtype(result_)-Dtype(zero_point)) * (1 / val_amp);
else
dst[index] = result_;
}
}
template<typename Dtype>
void cuda_fix_neuron_v1(const int N,
const Dtype* src,
const Dtype* fragpos,
Dtype* dst,
int val_min,
int val_max,
int keep_scale,
int method){
hipLaunchKernelGGL(( _fix_neuron_v1), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
src,
fragpos,
dst,
val_min,
val_max,
keep_scale,
method);
}
template
void cuda_fix_neuron_v1<float>(const int N,
const float* src,
const float* fragpos,
float* dst,
int val_min,
int val_max,
int keep_scale,
int method);
template
void cuda_fix_neuron_v1<double>(const int N,
const double* src,
const double* fragpos,
double* dst,
int val_min,
int val_max,
int keep_scale,
int method);
template<typename Dtype>
void cuda_fix_neuron_v2(const int N,
const Dtype* src,
Dtype* dst,
int val_min,
int val_max,
Dtype val_amp,
int zero_point,
int keep_scale,
int method){
hipLaunchKernelGGL(( _fix_neuron_v2), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
src,
dst,
val_min,
val_max,
val_amp,
zero_point,
keep_scale,
method);
}
template
void cuda_fix_neuron_v2<float>(const int N,
const float* src,
float* dst,
int val_min,
int val_max,
float val_amp,
int zero_point,
int keep_scale,
int method);
template
void cuda_fix_neuron_v2<double>(const int N,
const double* src,
double* dst,
int val_min,
int val_max,
double val_amp,
int zero_point,
int keep_scale,
int method);
template<typename Dtype>
void cuda_diff_S(const int N,
const Dtype* src,
Dtype* buffer,
Dtype* output,
int bitwidth,
int range,
int method){
// Calc search range for scale
int max_scale;
Dtype fix_lb = -pow(2, bitwidth - 1) - 0.5;
Dtype fix_ub = pow(2, bitwidth - 1) - 0.5;
Dtype x_max, x_min;
cuda_max(N, src, buffer);
hipMemcpy(&x_max, buffer, sizeof(Dtype), hipMemcpyDeviceToHost);
cuda_min(N, src, buffer);
hipMemcpy(&x_min, buffer, sizeof(Dtype), hipMemcpyDeviceToHost);
// Find max_scale
// Dtype step = ::max(x_min / fix_lb, x_max / fix_ub);
// Hipify thinks std::max is kernel code so converts it to ::max
// which doesn't behave correctly on the host side
Dtype step = x_min / fix_lb;
Dtype maxs = x_max / fix_ub;
if (maxs > step) step = maxs;
if (step <= FLT_MIN) {
max_scale = 18;
} else {
max_scale = floor(log2(1 / step));
}
#if 0
printf( "$$$$$$$$$$$ bw: %d range: %d method: %d\n",
bitwidth, range, method );
printf( "$$$$$$$$$$$ max: %g min: %g\n",
x_max, x_min );
printf( "$$$$$$$$$$$ overflow scale is %d\n", max_scale );
#endif
// Find fix pos in range [max_scale + range , max_scale]
Dtype final_scale = max_scale;
Dtype fixed_diff_min = FLT_MAX;
for (int scale = max_scale; scale < max_scale + range; scale++) {
cuda_fix_neuron_v2(N,
src,
buffer,
-(1<<(bitwidth-1)),
(1<<(bitwidth-1))-1,
Dtype(pow(2, scale)),
0,
1,
method);
cuda_sub(N, src, buffer);
cuda_pow(N, buffer, Dtype(2));
Dtype fixed_diff;
cuda_sum_inplace(N, buffer);
hipMemcpy(&fixed_diff,
buffer,
sizeof(Dtype),
hipMemcpyDeviceToHost);
if (fixed_diff < fixed_diff_min) {
final_scale = scale;
fixed_diff_min = fixed_diff;
}
}
//final_scale = final_scale > 15 ? 15: final_scale;
cuda_set(1, output, final_scale);
#if 0
printf( "$$$$$$$$$$$ diffs scale is %g, setting to %p...\n",
final_scale,
output ); fflush(stdout);
#endif
}
template
void cuda_diff_S<float>(const int N,
const float* src,
float* buffer,
float* output,
int bitwidth,
int range,
int method);
template
void cuda_diff_S<double>(const int N,
const double* src,
double* buffer,
double* output,
int bitwidth,
int range,
int method);
/*
Sigmoid & Tanh table look up FPGA
*/
template<typename Dtype>
__global__ static void _sigmoid_table_lookup(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
const Dtype* table,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 8.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -8.0)
output[i] = 0.0;
else {
int x = int(input[i] * scale);
int pos = 0;
if (x >= 0) {
if (fragpos >= 7)
pos = (x >> (fragpos - 7)) % 1024;
else
pos = (x << (7 - fragpos)) % 1024;
output[i] = table[pos + 1024] * fuzz;
}
else {
//if (fragpos >= 7)
// pos = (abs(x) >> (fragpos - 7)) % 1024;
//else
// pos = (x << (7 - fragpos)) % 1024;
pos = abs(int(floor(x / pow(2.0, (fragpos - 7))))) % 1024;
if (x >> fragpos == -8 && pos == 0)
output[i] = table[pos] * fuzz;
else
output[i] = table[1024 - pos] * fuzz;
}
}
}
}
template<typename Dtype>
void cuda_sigmoid_table_lookup(const int N,
const Dtype* input,
const Dtype* table,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
hipLaunchKernelGGL(( _sigmoid_table_lookup), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
fragpos,
scale,
fuzz,
input,
table,
output);
}
template
void cuda_sigmoid_table_lookup<float>(const int N,
const float* input,
const float* table,
float* output,
int fragpos);
template
void cuda_sigmoid_table_lookup<double>(const int N,
const double* input,
const double* table,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _tanh_table_lookup(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
const Dtype* table,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 4.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -4.0)
output[i] = -1.0;
else {
int x = int(input[i] * scale);
int pos = 0;
if (x >= 0) {
if (fragpos >= 8)
pos = (x >> (fragpos - 8)) % 1024;
else
pos = (x << (8 - fragpos)) % 1024;
output[i] = table[pos + 1024] * fuzz;
}
else {
//if (fragpos >= 8)
// pos = (abs(x) >> (fragpos - 8)) % 1024;
//else
// pos = (abs(x) << (8 - fragpos)) % 1024;
pos = abs(int(floor(x / pow(2.0, (fragpos - 8))))) % 1024;
if (x >> fragpos == -4 && pos == 0)
output[i] = table[pos] * fuzz;
else
output[i] = table[1024 - pos] * fuzz;
}
}
}
}
template<typename Dtype>
void cuda_tanh_table_lookup(const int N,
const Dtype* input,
const Dtype* table,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
hipLaunchKernelGGL(( _tanh_table_lookup), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
fragpos,
scale,
fuzz,
input,
table,
output);
}
template
void cuda_tanh_table_lookup<float>(const int N,
const float* input,
const float* table,
float* output,
int fragpos);
template
void cuda_tanh_table_lookup<double>(const int N,
const double* input,
const double* table,
double* output,
int fragpos);
/*
Sigmoid & Tanh simulation
*/
#define C_P0 1.98364257812E-4
#define CF_P1 1.3981999507E-3
#define CF_P2 8.3334519073E-3
#define CF_P3 4.1665795894E-2
#define CF_P4 1.6666665459E-1
#define CF_P5 5.0000001201E-1
__device__ static inline short bfloat16(float x){
int itmp = *(int*)&x;
if((itmp&0x00008000) == 0x00008000)
itmp += 0x00010000;
return (short)((itmp>>16)&0xFFFF);
}
__device__ static inline float rbfloat(short x){
int itmp = (x<<16)&0xFFFF0000;
return *((float *)&itmp);
}
__device__ static inline float as_bfloat16(float x){
int itmp = *(int*)&x;
if((itmp&0x00008000) == 0x00008000)
itmp += 0x00010000;
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline float as_bfloat16_numpy(float x){
int itmp = *(int*)&x;
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline int float2int_cuda(float x){
return *(int*)&x;
}
__device__ static inline float int2float_cuda(int x){
return *((float *)&x);
}
__device__ static inline float int2bfloat_cuda(int x){
int itmp = x;
if ((itmp&0x00008000) == 0x00008000) { // half even
if ((itmp&0xFFFF) > 0x00008000 || (((itmp&0xFFFF) == 0x00008000) && (itmp&0x10000) == 0x10000)){
itmp += 0x10000;
}
}
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline float float2bfloat_cuda(float x){
int itmp = *(int*)&x;
if ((itmp&0x00008000) == 0x00008000) { // half even
if ((itmp&0xFFFF) > 0x00008000 || (((itmp&0xFFFF) == 0x00008000) && (itmp&0x10000) == 0x10000)){
itmp += 0x10000;
}
}
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline int float2short_cuda(float x){
float itmp = float2bfloat_cuda(x);
return *(int*)&itmp;
}
// downshift one bit for short x
__device__ static inline int short_downshift_onebit_cuda(int x){ // input: fake short
int y, a, b;
y = x >> 17;
a = (x >> 16) & 1;
if (y&1 == 1) // half2even
y += a;
return y << 17; // fake short
}
__device__ float exp_sim(short x)
{
float ftmp, fz, fres, fx;
ftmp = rbfloat(x);
fres = ftmp*1.4375+0.5;
//round
fres += 12582912.0;
fres -= 12582912.0;
//round end
fz = fres;
fx = ftmp - fres*0.69140625;
fres = as_bfloat16(fx)*C_P0 + CF_P1;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P2;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P3;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P4;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P5;
fres = as_bfloat16(fx)*as_bfloat16(fres) + 1.0;
fres = as_bfloat16(fx)*as_bfloat16(fres) + 1.0;
fres = as_bfloat16(fres)*as_bfloat16(pow(2, fz));
return as_bfloat16(fres);
}
__device__ float inv_sim(float x)
{
float a = x;
int tt = 0x7F000000 - *(int*)&a;
float r = *(float *)&tt;
float m;
a = as_bfloat16(a);
for (int k=0; k<4; k++){
m = 2.0 - as_bfloat16(a)*as_bfloat16(r);
r = as_bfloat16(m)*as_bfloat16(r);
}
return r;
}
__device__ float sigmoid_sim(short x)
{
float fres = exp_sim(x);
float r = inv_sim(as_bfloat16(fres)+1.0);
fres = as_bfloat16(fres)*as_bfloat16(r);
return as_bfloat16(fres);
}
__device__ short sigmoid_short_sim(short x, int ishift, int oshift)
{
float fx = (float)x;
float iscale = pow(2, -ishift);
float res = as_bfloat16(iscale)*as_bfloat16(fx);
res = sigmoid_sim(bfloat16(res));
float oscale = pow(2, oshift);
res = as_bfloat16(oscale)*as_bfloat16(res);
float fy = as_bfloat16(res) + 12582912.0;
int y = *((int*)&fy);
y -= 0x4B400000;
return y&0xFFFF;
}
template<typename Dtype>
__global__ static void _sigmoid_simulation(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 8.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -8.0)
output[i] = 0.0;
else {
int x = int(input[i] * scale);
output[i] = sigmoid_short_sim(x, fragpos, 15) * fuzz;
}
}
}
template<typename Dtype>
void cuda_sigmoid_simulation(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
hipLaunchKernelGGL(( _sigmoid_simulation), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
fragpos,
scale,
fuzz,
input,
output);
}
template
void cuda_sigmoid_simulation<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_sigmoid_simulation<double>(const int N,
const double* input,
double* output,
int fragpos);
__device__ float tanh_sim(short x)
{
float fres = exp_sim(x);
fres = as_bfloat16(fres)*as_bfloat16(fres);
float r = inv_sim(as_bfloat16(fres)+1.0);
fres = as_bfloat16(fres)-1.0;
fres = as_bfloat16(fres)*as_bfloat16(r);
return as_bfloat16(fres);
}
__device__ short tanh_short_sim(short x, int ishift, int oshift)
{
float fx = (float)x;
float iscale = pow(2, -ishift);
float res = as_bfloat16(iscale)*as_bfloat16(fx);
res = tanh_sim(bfloat16(res));
float oscale = pow(2, oshift);
res = as_bfloat16(oscale)*as_bfloat16(res);
float fy = as_bfloat16(res) + 12582912.0;
int y = *((int*)&fy);
y -= 0x4B400000;
return y&0xFFFF;
}
template<typename Dtype>
__global__ static void _tanh_simulation(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 4.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -4.0)
output[i] = -1.0;
else {
int x = int(input[i] * scale);
output[i] = tanh_short_sim(x, fragpos, 15) * fuzz;
}
}
}
template<typename Dtype>
void cuda_tanh_simulation(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
hipLaunchKernelGGL(( _tanh_simulation), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
fragpos,
scale,
fuzz,
input,
output);
}
template
void cuda_tanh_simulation<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_tanh_simulation<double>(const int N,
const double* input,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _softmax_exp_approximate(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
Dtype u;
Dtype v;
if (input[i] >= 0){
u = floor(input[i]);
v = input[i] - u;
}
else{
u = ceil(input[i]);
v = input[i] - u;
}
if (v <= -0.75)
output[i] = ((12409.0/pow(2.0, 15)) * v + 28747.0/pow(2.0, 15))/pow(2.0, -u);
else if (v <= -0.5)
output[i] = ((14759.0/pow(2.0, 15)) * v + 30497.0/pow(2.0, 15))/pow(2.0, -u);
else if (v <= -0.25)
output[i] = ((17551.0/pow(2.0, 15)) * v + 31880.0/pow(2.0, 15))/pow(2.0, -u);
else {
output[i] = ((20873.0/pow(2.0, 15)) * v + 32696.0/pow(2.0, 15))/pow(2.0, -u);
}
}
}
/*
Hardware-PL softmax
*/
template<typename Dtype>
void cuda_softmax_exp_approximate(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _softmax_exp_approximate), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_softmax_exp_approximate<float>(const int N,
const float* input,
float* output);
template
void cuda_softmax_exp_approximate<double>(const int N,
const double* input,
double* output);
template<typename Dtype>
__global__ static void _softmax_lod(const int N,
const Dtype* input,
Dtype* output)
{
NNDCT_KERNEL_LOOP(i, N){
float lod_s = 0;
float s_int = floor(input[i]);
while(s_int >= 2){
lod_s += 1;
s_int /= 2;
}
output[i] = lod_s;
}
}
template<typename Dtype>
void cuda_softmax_lod(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _softmax_lod), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_softmax_lod<float>(const int N,
const float* input,
float* output);
template
void cuda_softmax_lod<double>(const int N,
const double* input,
double* output);
/*
Liyi softmax
*/
template<typename Dtype>
__global__ static void _softmax_simulation_part_1(const int N,
const Dtype* input,
Dtype* output)
{
float temp;
int itemp;
short bf16hex;
float fres;
NNDCT_KERNEL_LOOP(i, N){
if (input[i] <= -80)
temp = -80;
else{
temp = input[i];
}
itemp = *((int *)&temp);
bf16hex = (itemp>>16) & 0xFFFF;
fres = exp_sim(bf16hex);
output[i] = fres;
}
}
template<typename Dtype>
void cuda_softmax_simulation_part_1(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _softmax_simulation_part_1), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_softmax_simulation_part_1<float>(const int N,
const float* input,
float* output);
template
void cuda_softmax_simulation_part_1<double>(const int N,
const double* input,
double* output);
template<typename Dtype>
__global__ static void _softmax_simulation_part_2(const int N,
const Dtype* sum,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
float r = inv_sim(as_bfloat16(sum[i]));
output[i] = as_bfloat16(r);
}
}
template<typename Dtype>
void cuda_softmax_simulation_part_2(const int N,
const Dtype* sum,
Dtype* output)
{
hipLaunchKernelGGL(( _softmax_simulation_part_2), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
sum,
output);
}
template
void cuda_softmax_simulation_part_2<float>(const int N,
const float* sum,
float* output);
template
void cuda_softmax_simulation_part_2<double>(const int N,
const double* sum,
double* output);
/*
Sigmoid & Tanh & LogSoftmax Table Look up AIE2
*/
#define STEP 0.0009765625f //16/256/256
#define LN2 0.69314718056f
__device__ static inline float short_to_float(short x){
int itmp = (x<<16)&0xFFFF0000;
float f = *((float *)&itmp);
return f;
}
__device__ static inline short clip_int16(float x){
short res;
if(x > 32767)
res = 32767;
else if(x < -32768)
res = -32768;
else
res = short(x);
return res;
}
__device__ float vector_inv(float x){
short x_as_int16 = bfloat16(x);
short num = 0x3F80;
short res_as_int16 = 2*num - x_as_int16;
return short_to_float(res_as_int16);
}
__device__ float compute_inv(float x){
unsigned int *B_x;
unsigned int exp_mask = 0x7F800000;
unsigned int mantissa_mask = 0x007FFFFF;
unsigned int mantissa_Q = 0x00008000;
unsigned char exponent, mantissa;
unsigned int inv_exponent;
unsigned short inv_x_val;
float inv_x_float;
B_x = (unsigned int*)&x;
exponent = (*B_x & exp_mask) >> 23;
mantissa = ((*B_x & mantissa_Q)==0) ? ((*B_x & mantissa_mask)>>16) : ((*B_x & mantissa_mask)>>16)+1;
inv_exponent = 253-exponent;
if(mantissa > 127)
mantissa = 127;
inv_x_val = (inv_exponent<<7) + m_inv_lut[mantissa];
inv_x_float = short_to_float(inv_x_val);
return inv_x_float;
}
__device__ float compute_exp(short x){
unsigned short x_no_sign, h_8bit, l_8bit, h, l;
float f1, f2, res;
x_no_sign = (unsigned short)x;
h_8bit = (x_no_sign>>8)&0X00FF;
l_8bit = x_no_sign&0X00FF;
//if(l_8bit>=0x00E0 && l_8bit <=0x00FF)
// h_8bit += 0x0001;
h = h_8bit/8*16 + h_8bit%8;
l = l_8bit/8*16 + l_8bit%8;
f1 = short_to_float(s_ilut_ab[h]);
f2 = short_to_float(s_flut_ab[l]);
res = as_bfloat16(f1*f2);
return res;
}
__device__ float compute_exp_soft(short x){
unsigned short x_no_sign, h_8bit, l_8bit, h, l;
float f1, f2, res;
x_no_sign = (unsigned short)x;
h_8bit = (x_no_sign>>8)&0X00FF;
l_8bit = x_no_sign&0X00FF;
//if(l_8bit>=0x00E0 && l_8bit <=0x00FF)
// h_8bit += 0x0001;
h = h_8bit/8*16 + h_8bit%8;
l = l_8bit/8*16 + l_8bit%8;
f1 = short_to_float(s_ilut_cd[h]);
f2 = short_to_float(s_flut_cd[l]);
res = as_bfloat16(f1*f2);
return res;
}
// cubic approximation of ln(x) in range [1, 2]:
__device__ static inline float small_ln(float x) {
x -= 1.0f;
return 0.6931471805599453f*x*(1.4201157697141027f + x*(-0.5747927782450741f + x*(0.15468105905881002f)));
}
//for ln(x) with x>0
__device__ float fast_ln(float x){
unsigned char exponent;
float mantissa, ln_mantissa;
float res_ln;
int x_int = *(int *)&x;
exponent = (x_int&0x7f800000) >> 23;
x_int &= 0x007FFFFF;//mask away the exp
x_int |= 0x3F800000;//set the exp to 127
mantissa = *(float *)&x_int;
ln_mantissa = small_ln(mantissa);
res_ln = ln_mantissa+(exponent-127)*LN2;
return as_bfloat16(res_ln);
}
template<typename Dtype>
__global__ static void _sigmoid_table_lookup_aie2(const int N,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
short in;
float exp_sigm;
float denom_sigm, denom_inv_sigm;
float sigmoid, in_float, res_float;
in_float = input[i] * pow(2.0, 12);
in = clip_int16(in_float);
exp_sigm = compute_exp(in);
denom_sigm = exp_sigm + 1.0;
denom_inv_sigm = vector_inv(denom_sigm);
sigmoid = as_bfloat16(exp_sigm * denom_inv_sigm);
res_float = as_bfloat16(sigmoid * pow(2.0, 15));
output[i] = clip_int16(res_float)* pow(2.0, -15);
}
}
template<typename Dtype>
void cuda_sigmoid_table_lookup_aie2(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype fuzz = 1.0 / 32768;
hipLaunchKernelGGL(( _sigmoid_table_lookup_aie2), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
fuzz,
input,
output);
}
template
void cuda_sigmoid_table_lookup_aie2<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_sigmoid_table_lookup_aie2<double>(const int N,
const double* input,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _tanh_table_lookup_aie2(const int N,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
short in;
float temp_tanh;
float denom_inv_tanh;
float tanh, in_float, res_float;
short res;
in_float = input[i] * pow(2.0, 12);
in = clip_int16(in_float);
temp_tanh = compute_exp(in);
temp_tanh = temp_tanh * temp_tanh;//e^2x
temp_tanh = as_bfloat16(temp_tanh);
denom_inv_tanh = vector_inv(temp_tanh + 1.0);//1/(e^2x + 1)
temp_tanh = as_bfloat16(temp_tanh) - 1.0;//e^2x-1
tanh = as_bfloat16(temp_tanh) * denom_inv_tanh;
res_float = as_bfloat16(tanh * pow(2.0, 15));
res = clip_int16(res_float);
output[i] = res* pow(2.0, -15);
}
}
template<typename Dtype>
void cuda_tanh_table_lookup_aie2(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
hipLaunchKernelGGL(( _tanh_table_lookup_aie2), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
fuzz,
input,
output);
}
template
void cuda_tanh_table_lookup_aie2<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_tanh_table_lookup_aie2<double>(const int N,
const double* input,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _exp_appr_aie2(const int N,
const Dtype* input,
Dtype* output,
const int bit_width) {
NNDCT_KERNEL_LOOP(i, N){
float input_f;
if(input[i] < -63.0){
input_f = -63;
}
else{
input_f = input[i];
}
output[i] = compute_exp_soft(short(input_f * -1024));
}
}
template<typename Dtype>
void cuda_exp_appr_aie2(const int N,
const Dtype* input,
Dtype* output,
const int bit_width)
{
hipLaunchKernelGGL(( _exp_appr_aie2), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output,
bit_width);
}
template
void cuda_exp_appr_aie2<float>(const int N,
const float* input,
float* output,
const int bit_width);
template
void cuda_exp_appr_aie2<double>(const int N,
const double* input,
double* output,
const int bit_width);
template<typename Dtype>
__global__ static void _log_softmax_fast_ln(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = fast_ln(input[i]);
}
}
template<typename Dtype>
void cuda_log_softmax_fast_ln(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _log_softmax_fast_ln), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_log_softmax_fast_ln<float>(const int N,
const float* input,
float* output);
template
void cuda_log_softmax_fast_ln<double>(const int N,
const double* input,
double* output);
template<typename Dtype>
__global__ static void _log_softmax_sub(const int N,
const Dtype* input,
Dtype* output,
const Dtype* sum) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = as_bfloat16(as_bfloat16((float)input[i])-sum[0]);
}
}
template<typename Dtype>
void cuda_log_softmax_sub(const int N,
const Dtype* input,
Dtype* output,
const Dtype* sum)
{
hipLaunchKernelGGL(( _log_softmax_sub), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output,
sum);
}
template
void cuda_log_softmax_sub<float>(const int N,
const float* input,
float* output,
const float* sum);
template
void cuda_log_softmax_sub<double>(const int N,
const double* input,
double* output,
const double* sum);
// aie sqrt(x) = x*(1/sqrt(x)) with Newton iteration for 1/sqrt(x)
__device__ float _sqrt(const float x){
float x2, y, y3h, out;
int i;
x2 = x*0.5;
y = x;
i = float2int_cuda(y); // bitwise float32 to int32
i = (0x5f37 - (i >> 17)) << 16; // int32 -> int16 -> int32
y = int2float_cuda(i); // initial value: bitwise int32 to float32
// one step Newton iteration: y = y*(1.5 - (x2*y*y)) = 1.5*y - x2*y*y*y
y3h = 1.5*y; // float32
y3h = float2bfloat_cuda(y3h); // bfloat with 32 bitwidth
out = y*x2; // float32
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = out*y; // float32
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = out*y;
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = y3h - out; // float32: 1/sqrt(x) = 1.5*y - x2*y*y*y
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = x*out; // sqrt(x) = x*(1/sqrt(x))
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
return out;
}
template<typename Dtype>
__global__ static void _aie_sqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = _sqrt(input[i]);
}
}
template<typename Dtype>
void cuda_aie_sqrt(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _aie_sqrt), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_aie_sqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_aie_sqrt<double>(const int N,
const double* input,
double* output);
/*
AIE2 isqrt, bfloat16 iteration
*/
__device__ float _isqrt(float x){
float x2, y, y2, mul2, mul, sub, threehalfs;
int i;
x2 = x*0.5;
x2 = float2bfloat_cuda(x2); // bitwise float32 to bfloat16
y = x;
i = float2short_cuda(y); // bitwise float32 to short (int16)
i = (0x5f37 - (short_downshift_onebit_cuda(i) >> 17)) << 16; // fake short
y = int2bfloat_cuda(i); // initial value: bitwise short to bfloat16
threehalfs = float2bfloat_cuda(1.5); // bfloat16
// 4-steps-Newton iteration: y = y*(1.5 - (x2*y*y))
for(int i=0; i<4; i++){
y2 = y*y;
y2 = float2bfloat_cuda(y2); // bfloat with 32 bitwidth
mul2 = x2*y2;
mul2 = float2bfloat_cuda(mul2);
sub = threehalfs - mul2;
sub = float2bfloat_cuda(sub);
mul = y*sub;
y = float2bfloat_cuda(mul);
}
return y;
}
template<typename Dtype>
__global__ static void _aie_isqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = _isqrt(input[i]);
}
}
template<typename Dtype>
void cuda_aie_isqrt(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _aie_isqrt), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_aie_isqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_aie_isqrt<double>(const int N,
const double* input,
double* output);
/*
Layernorm isqrt AIE2, float32 iteration
*/
__device__ float isqrt(float x){
float x2, y, threehalfs;
int i;
x2 = x*0.5;
y = x;
threehalfs = 1.5;
i = float2int_cuda(y); // bitwise float32 to int32
i = 0x5f3759df - (i >> 1);
y = int2float_cuda(i); // bitwise int32 to float32
y = y*(threehalfs - (x2*y*y)); // Newton steps
y = y*(threehalfs - (x2*y*y));
y = y*(threehalfs - (x2*y*y));
y = y*(threehalfs - (x2*y*y));
return y;
}
template<typename Dtype>
__global__ static void _layernorm_isqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = isqrt(input[i]);
}
}
template<typename Dtype>
void cuda_layernorm_isqrt(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _layernorm_isqrt), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_layernorm_isqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_layernorm_isqrt<double>(const int N,
const double* input,
double* output);
/*
Layernorm Inv Sqrt AIE2
*/
__device__ float invsqrt(float x){
x = as_bfloat16_numpy(x);
short i;
float x1,x2;
float y1,y2,y;
x1 = as_bfloat16_numpy(x * 0.5);
x2 = as_bfloat16_numpy(x);
i = bfloat16(x2);
i = 0x5f37 - ( i >> 1 );
x2 = rbfloat(i);
y2 = as_bfloat16_numpy(x2 * 1.5);
y1 = x1*x2*x2*x2;
y1 = as_bfloat16_numpy(y1);
y = y2-y1;
y = as_bfloat16_numpy(y);
return y;
}
template<typename Dtype>
__global__ static void _layernorm_invsqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = invsqrt(input[i]);
}
}
template<typename Dtype>
void cuda_layernorm_invsqrt(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _layernorm_invsqrt), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_layernorm_invsqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_layernorm_invsqrt<double>(const int N,
const double* input,
double* output);
/*
AIE2 Softmax
*/
template<typename Dtype>
__global__ static void _inverse_aie2(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = compute_inv(input[i]);
}
}
template<typename Dtype>
void cuda_inverse_aie2(const int N,
const Dtype* input,
Dtype* output)
{
hipLaunchKernelGGL(( _inverse_aie2), dim3(NNDCT_GET_BLOCKS(N)),dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N,
input,
output);
}
template
void cuda_inverse_aie2<float>(const int N,
const float* input,
float* output);
template
void cuda_inverse_aie2<double>(const int N,
const double* input,
double* output);
| 9808db7b37ceae0139af649f9020b16becade62b.cu |
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <float.h>
#include "../../include/cuda/nndct_fix_kernels.cuh"
#include "../../include/cuda/nndct_fix_kernels.h"
#include "../../include/cuda/nndct_cuda_math.h"
#include "../../include/cuda/nndct_cu_utils.h"
#include "../../include/cuda/table_data.h"
template<typename Dtype>
__global__ static void _cuda_vai_round(const int N,
const Dtype* src,
Dtype* dst,
int method){
NNDCT_KERNEL_LOOP(index, N){
int result_ = 0;
_vai_round_device(src[index], result_, method);
dst[index] = result_;
}
}
template<typename Dtype>
void cuda_vai_round(const int N,
const Dtype* src,
Dtype* dst,
int method){
_cuda_vai_round<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
src,
dst,
method);
}
template
void cuda_vai_round<float>(const int N,
const float* src,
float* dst,
int method);
template
void cuda_vai_round<double>(const int N,
const double* src,
double* dst,
int method);
template<typename Dtype>
__global__ static void _fix_neuron_v1(const int N,
const Dtype* src,
const Dtype* fragpos,
Dtype* dst,
int val_min,
int val_max,
int keep_scale,
int method){
NNDCT_KERNEL_LOOP(index, N){
//method:
//1: dummy
//2: for CNN feature map
//3: for weights and bias
//4: for RNN feature map
int result_ = 0;
Dtype val_amp = pow(2, *fragpos);
_fix_neuron_v2_device(src[index],
result_,
val_min,
val_max,
val_amp,
0,
method);
if(0 != keep_scale)
dst[index] = Dtype(result_) * (1 / val_amp);
else
dst[index] = result_;
}
}
template<typename Dtype>
__global__ static void _fix_neuron_v2(const int N,
const Dtype* src,
Dtype* dst,
int val_min,
int val_max,
Dtype val_amp,
int zero_point,
int keep_scale,
int method){
NNDCT_KERNEL_LOOP(index, N){
//method:
//1: dummy
//2: for CNN feature map
//3: for weights and bias
//4: for RNN feature map
int result_ = 0;
_fix_neuron_v2_device(src[index],
result_,
val_min,
val_max,
val_amp,
zero_point,
method);
//printf( "$$$$$$$$$$$ result: %d zero_point: %d keep_scale: %d\n", result_, zero_point, keep_scale);
if(0 != keep_scale)
dst[index] = (Dtype(result_)-Dtype(zero_point)) * (1 / val_amp);
else
dst[index] = result_;
}
}
template<typename Dtype>
void cuda_fix_neuron_v1(const int N,
const Dtype* src,
const Dtype* fragpos,
Dtype* dst,
int val_min,
int val_max,
int keep_scale,
int method){
_fix_neuron_v1<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
src,
fragpos,
dst,
val_min,
val_max,
keep_scale,
method);
}
template
void cuda_fix_neuron_v1<float>(const int N,
const float* src,
const float* fragpos,
float* dst,
int val_min,
int val_max,
int keep_scale,
int method);
template
void cuda_fix_neuron_v1<double>(const int N,
const double* src,
const double* fragpos,
double* dst,
int val_min,
int val_max,
int keep_scale,
int method);
template<typename Dtype>
void cuda_fix_neuron_v2(const int N,
const Dtype* src,
Dtype* dst,
int val_min,
int val_max,
Dtype val_amp,
int zero_point,
int keep_scale,
int method){
_fix_neuron_v2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
src,
dst,
val_min,
val_max,
val_amp,
zero_point,
keep_scale,
method);
}
template
void cuda_fix_neuron_v2<float>(const int N,
const float* src,
float* dst,
int val_min,
int val_max,
float val_amp,
int zero_point,
int keep_scale,
int method);
template
void cuda_fix_neuron_v2<double>(const int N,
const double* src,
double* dst,
int val_min,
int val_max,
double val_amp,
int zero_point,
int keep_scale,
int method);
template<typename Dtype>
void cuda_diff_S(const int N,
const Dtype* src,
Dtype* buffer,
Dtype* output,
int bitwidth,
int range,
int method){
// Calc search range for scale
int max_scale;
Dtype fix_lb = -pow(2, bitwidth - 1) - 0.5;
Dtype fix_ub = pow(2, bitwidth - 1) - 0.5;
Dtype x_max, x_min;
cuda_max(N, src, buffer);
cudaMemcpy(&x_max, buffer, sizeof(Dtype), cudaMemcpyDeviceToHost);
cuda_min(N, src, buffer);
cudaMemcpy(&x_min, buffer, sizeof(Dtype), cudaMemcpyDeviceToHost);
// Find max_scale
// Dtype step = std::max(x_min / fix_lb, x_max / fix_ub);
// Hipify thinks std::max is kernel code so converts it to ::max
// which doesn't behave correctly on the host side
Dtype step = x_min / fix_lb;
Dtype maxs = x_max / fix_ub;
if (maxs > step) step = maxs;
if (step <= FLT_MIN) {
max_scale = 18;
} else {
max_scale = floor(log2(1 / step));
}
#if 0
printf( "$$$$$$$$$$$ bw: %d range: %d method: %d\n",
bitwidth, range, method );
printf( "$$$$$$$$$$$ max: %g min: %g\n",
x_max, x_min );
printf( "$$$$$$$$$$$ overflow scale is %d\n", max_scale );
#endif
// Find fix pos in range [max_scale + range , max_scale]
Dtype final_scale = max_scale;
Dtype fixed_diff_min = FLT_MAX;
for (int scale = max_scale; scale < max_scale + range; scale++) {
cuda_fix_neuron_v2(N,
src,
buffer,
-(1<<(bitwidth-1)),
(1<<(bitwidth-1))-1,
Dtype(pow(2, scale)),
0,
1,
method);
cuda_sub(N, src, buffer);
cuda_pow(N, buffer, Dtype(2));
Dtype fixed_diff;
cuda_sum_inplace(N, buffer);
cudaMemcpy(&fixed_diff,
buffer,
sizeof(Dtype),
cudaMemcpyDeviceToHost);
if (fixed_diff < fixed_diff_min) {
final_scale = scale;
fixed_diff_min = fixed_diff;
}
}
//final_scale = final_scale > 15 ? 15: final_scale;
cuda_set(1, output, final_scale);
#if 0
printf( "$$$$$$$$$$$ diffs scale is %g, setting to %p...\n",
final_scale,
output ); fflush(stdout);
#endif
}
template
void cuda_diff_S<float>(const int N,
const float* src,
float* buffer,
float* output,
int bitwidth,
int range,
int method);
template
void cuda_diff_S<double>(const int N,
const double* src,
double* buffer,
double* output,
int bitwidth,
int range,
int method);
/*
Sigmoid & Tanh table look up FPGA
*/
template<typename Dtype>
__global__ static void _sigmoid_table_lookup(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
const Dtype* table,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 8.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -8.0)
output[i] = 0.0;
else {
int x = int(input[i] * scale);
int pos = 0;
if (x >= 0) {
if (fragpos >= 7)
pos = (x >> (fragpos - 7)) % 1024;
else
pos = (x << (7 - fragpos)) % 1024;
output[i] = table[pos + 1024] * fuzz;
}
else {
//if (fragpos >= 7)
// pos = (abs(x) >> (fragpos - 7)) % 1024;
//else
// pos = (x << (7 - fragpos)) % 1024;
pos = abs(int(floor(x / pow(2.0, (fragpos - 7))))) % 1024;
if (x >> fragpos == -8 && pos == 0)
output[i] = table[pos] * fuzz;
else
output[i] = table[1024 - pos] * fuzz;
}
}
}
}
template<typename Dtype>
void cuda_sigmoid_table_lookup(const int N,
const Dtype* input,
const Dtype* table,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
_sigmoid_table_lookup<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
fragpos,
scale,
fuzz,
input,
table,
output);
}
template
void cuda_sigmoid_table_lookup<float>(const int N,
const float* input,
const float* table,
float* output,
int fragpos);
template
void cuda_sigmoid_table_lookup<double>(const int N,
const double* input,
const double* table,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _tanh_table_lookup(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
const Dtype* table,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 4.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -4.0)
output[i] = -1.0;
else {
int x = int(input[i] * scale);
int pos = 0;
if (x >= 0) {
if (fragpos >= 8)
pos = (x >> (fragpos - 8)) % 1024;
else
pos = (x << (8 - fragpos)) % 1024;
output[i] = table[pos + 1024] * fuzz;
}
else {
//if (fragpos >= 8)
// pos = (abs(x) >> (fragpos - 8)) % 1024;
//else
// pos = (abs(x) << (8 - fragpos)) % 1024;
pos = abs(int(floor(x / pow(2.0, (fragpos - 8))))) % 1024;
if (x >> fragpos == -4 && pos == 0)
output[i] = table[pos] * fuzz;
else
output[i] = table[1024 - pos] * fuzz;
}
}
}
}
template<typename Dtype>
void cuda_tanh_table_lookup(const int N,
const Dtype* input,
const Dtype* table,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
_tanh_table_lookup<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
fragpos,
scale,
fuzz,
input,
table,
output);
}
template
void cuda_tanh_table_lookup<float>(const int N,
const float* input,
const float* table,
float* output,
int fragpos);
template
void cuda_tanh_table_lookup<double>(const int N,
const double* input,
const double* table,
double* output,
int fragpos);
/*
Sigmoid & Tanh simulation
*/
#define C_P0 1.98364257812E-4
#define CF_P1 1.3981999507E-3
#define CF_P2 8.3334519073E-3
#define CF_P3 4.1665795894E-2
#define CF_P4 1.6666665459E-1
#define CF_P5 5.0000001201E-1
__device__ static inline short bfloat16(float x){
int itmp = *(int*)&x;
if((itmp&0x00008000) == 0x00008000)
itmp += 0x00010000;
return (short)((itmp>>16)&0xFFFF);
}
__device__ static inline float rbfloat(short x){
int itmp = (x<<16)&0xFFFF0000;
return *((float *)&itmp);
}
__device__ static inline float as_bfloat16(float x){
int itmp = *(int*)&x;
if((itmp&0x00008000) == 0x00008000)
itmp += 0x00010000;
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline float as_bfloat16_numpy(float x){
int itmp = *(int*)&x;
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline int float2int_cuda(float x){
return *(int*)&x;
}
__device__ static inline float int2float_cuda(int x){
return *((float *)&x);
}
__device__ static inline float int2bfloat_cuda(int x){
int itmp = x;
if ((itmp&0x00008000) == 0x00008000) { // half even
if ((itmp&0xFFFF) > 0x00008000 || (((itmp&0xFFFF) == 0x00008000) && (itmp&0x10000) == 0x10000)){
itmp += 0x10000;
}
}
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline float float2bfloat_cuda(float x){
int itmp = *(int*)&x;
if ((itmp&0x00008000) == 0x00008000) { // half even
if ((itmp&0xFFFF) > 0x00008000 || (((itmp&0xFFFF) == 0x00008000) && (itmp&0x10000) == 0x10000)){
itmp += 0x10000;
}
}
itmp &= 0xFFFF0000;
return *(float *)&itmp;
}
__device__ static inline int float2short_cuda(float x){
float itmp = float2bfloat_cuda(x);
return *(int*)&itmp;
}
// downshift one bit for short x
__device__ static inline int short_downshift_onebit_cuda(int x){ // input: fake short
int y, a, b;
y = x >> 17;
a = (x >> 16) & 1;
if (y&1 == 1) // half2even
y += a;
return y << 17; // fake short
}
__device__ float exp_sim(short x)
{
float ftmp, fz, fres, fx;
ftmp = rbfloat(x);
fres = ftmp*1.4375+0.5;
//round
fres += 12582912.0;
fres -= 12582912.0;
//round end
fz = fres;
fx = ftmp - fres*0.69140625;
fres = as_bfloat16(fx)*C_P0 + CF_P1;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P2;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P3;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P4;
fres = as_bfloat16(fx)*as_bfloat16(fres) + CF_P5;
fres = as_bfloat16(fx)*as_bfloat16(fres) + 1.0;
fres = as_bfloat16(fx)*as_bfloat16(fres) + 1.0;
fres = as_bfloat16(fres)*as_bfloat16(pow(2, fz));
return as_bfloat16(fres);
}
__device__ float inv_sim(float x)
{
float a = x;
int tt = 0x7F000000 - *(int*)&a;
float r = *(float *)&tt;
float m;
a = as_bfloat16(a);
for (int k=0; k<4; k++){
m = 2.0 - as_bfloat16(a)*as_bfloat16(r);
r = as_bfloat16(m)*as_bfloat16(r);
}
return r;
}
__device__ float sigmoid_sim(short x)
{
float fres = exp_sim(x);
float r = inv_sim(as_bfloat16(fres)+1.0);
fres = as_bfloat16(fres)*as_bfloat16(r);
return as_bfloat16(fres);
}
__device__ short sigmoid_short_sim(short x, int ishift, int oshift)
{
float fx = (float)x;
float iscale = pow(2, -ishift);
float res = as_bfloat16(iscale)*as_bfloat16(fx);
res = sigmoid_sim(bfloat16(res));
float oscale = pow(2, oshift);
res = as_bfloat16(oscale)*as_bfloat16(res);
float fy = as_bfloat16(res) + 12582912.0;
int y = *((int*)&fy);
y -= 0x4B400000;
return y&0xFFFF;
}
template<typename Dtype>
__global__ static void _sigmoid_simulation(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 8.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -8.0)
output[i] = 0.0;
else {
int x = int(input[i] * scale);
output[i] = sigmoid_short_sim(x, fragpos, 15) * fuzz;
}
}
}
template<typename Dtype>
void cuda_sigmoid_simulation(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
_sigmoid_simulation<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
fragpos,
scale,
fuzz,
input,
output);
}
template
void cuda_sigmoid_simulation<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_sigmoid_simulation<double>(const int N,
const double* input,
double* output,
int fragpos);
__device__ float tanh_sim(short x)
{
float fres = exp_sim(x);
fres = as_bfloat16(fres)*as_bfloat16(fres);
float r = inv_sim(as_bfloat16(fres)+1.0);
fres = as_bfloat16(fres)-1.0;
fres = as_bfloat16(fres)*as_bfloat16(r);
return as_bfloat16(fres);
}
__device__ short tanh_short_sim(short x, int ishift, int oshift)
{
float fx = (float)x;
float iscale = pow(2, -ishift);
float res = as_bfloat16(iscale)*as_bfloat16(fx);
res = tanh_sim(bfloat16(res));
float oscale = pow(2, oshift);
res = as_bfloat16(oscale)*as_bfloat16(res);
float fy = as_bfloat16(res) + 12582912.0;
int y = *((int*)&fy);
y -= 0x4B400000;
return y&0xFFFF;
}
template<typename Dtype>
__global__ static void _tanh_simulation(const int N,
const int fragpos,
const Dtype scale,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
if (input[i] >= 4.0)
output[i] = 1.0 - fuzz;
else if (input[i] < -4.0)
output[i] = -1.0;
else {
int x = int(input[i] * scale);
output[i] = tanh_short_sim(x, fragpos, 15) * fuzz;
}
}
}
template<typename Dtype>
void cuda_tanh_simulation(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
_tanh_simulation<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
fragpos,
scale,
fuzz,
input,
output);
}
template
void cuda_tanh_simulation<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_tanh_simulation<double>(const int N,
const double* input,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _softmax_exp_approximate(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
Dtype u;
Dtype v;
if (input[i] >= 0){
u = floor(input[i]);
v = input[i] - u;
}
else{
u = ceil(input[i]);
v = input[i] - u;
}
if (v <= -0.75)
output[i] = ((12409.0/pow(2.0, 15)) * v + 28747.0/pow(2.0, 15))/pow(2.0, -u);
else if (v <= -0.5)
output[i] = ((14759.0/pow(2.0, 15)) * v + 30497.0/pow(2.0, 15))/pow(2.0, -u);
else if (v <= -0.25)
output[i] = ((17551.0/pow(2.0, 15)) * v + 31880.0/pow(2.0, 15))/pow(2.0, -u);
else {
output[i] = ((20873.0/pow(2.0, 15)) * v + 32696.0/pow(2.0, 15))/pow(2.0, -u);
}
}
}
/*
Hardware-PL softmax
*/
template<typename Dtype>
void cuda_softmax_exp_approximate(const int N,
const Dtype* input,
Dtype* output)
{
_softmax_exp_approximate<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_softmax_exp_approximate<float>(const int N,
const float* input,
float* output);
template
void cuda_softmax_exp_approximate<double>(const int N,
const double* input,
double* output);
template<typename Dtype>
__global__ static void _softmax_lod(const int N,
const Dtype* input,
Dtype* output)
{
NNDCT_KERNEL_LOOP(i, N){
float lod_s = 0;
float s_int = floor(input[i]);
while(s_int >= 2){
lod_s += 1;
s_int /= 2;
}
output[i] = lod_s;
}
}
template<typename Dtype>
void cuda_softmax_lod(const int N,
const Dtype* input,
Dtype* output)
{
_softmax_lod<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_softmax_lod<float>(const int N,
const float* input,
float* output);
template
void cuda_softmax_lod<double>(const int N,
const double* input,
double* output);
/*
Liyi softmax
*/
template<typename Dtype>
__global__ static void _softmax_simulation_part_1(const int N,
const Dtype* input,
Dtype* output)
{
float temp;
int itemp;
short bf16hex;
float fres;
NNDCT_KERNEL_LOOP(i, N){
if (input[i] <= -80)
temp = -80;
else{
temp = input[i];
}
itemp = *((int *)&temp);
bf16hex = (itemp>>16) & 0xFFFF;
fres = exp_sim(bf16hex);
output[i] = fres;
}
}
template<typename Dtype>
void cuda_softmax_simulation_part_1(const int N,
const Dtype* input,
Dtype* output)
{
_softmax_simulation_part_1<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_softmax_simulation_part_1<float>(const int N,
const float* input,
float* output);
template
void cuda_softmax_simulation_part_1<double>(const int N,
const double* input,
double* output);
template<typename Dtype>
__global__ static void _softmax_simulation_part_2(const int N,
const Dtype* sum,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
float r = inv_sim(as_bfloat16(sum[i]));
output[i] = as_bfloat16(r);
}
}
template<typename Dtype>
void cuda_softmax_simulation_part_2(const int N,
const Dtype* sum,
Dtype* output)
{
_softmax_simulation_part_2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
sum,
output);
}
template
void cuda_softmax_simulation_part_2<float>(const int N,
const float* sum,
float* output);
template
void cuda_softmax_simulation_part_2<double>(const int N,
const double* sum,
double* output);
/*
Sigmoid & Tanh & LogSoftmax Table Look up AIE2
*/
#define STEP 0.0009765625f //16/256/256
#define LN2 0.69314718056f
__device__ static inline float short_to_float(short x){
int itmp = (x<<16)&0xFFFF0000;
float f = *((float *)&itmp);
return f;
}
__device__ static inline short clip_int16(float x){
short res;
if(x > 32767)
res = 32767;
else if(x < -32768)
res = -32768;
else
res = short(x);
return res;
}
__device__ float vector_inv(float x){
short x_as_int16 = bfloat16(x);
short num = 0x3F80;
short res_as_int16 = 2*num - x_as_int16;
return short_to_float(res_as_int16);
}
__device__ float compute_inv(float x){
unsigned int *B_x;
unsigned int exp_mask = 0x7F800000;
unsigned int mantissa_mask = 0x007FFFFF;
unsigned int mantissa_Q = 0x00008000;
unsigned char exponent, mantissa;
unsigned int inv_exponent;
unsigned short inv_x_val;
float inv_x_float;
B_x = (unsigned int*)&x;
exponent = (*B_x & exp_mask) >> 23;
mantissa = ((*B_x & mantissa_Q)==0) ? ((*B_x & mantissa_mask)>>16) : ((*B_x & mantissa_mask)>>16)+1;
inv_exponent = 253-exponent;
if(mantissa > 127)
mantissa = 127;
inv_x_val = (inv_exponent<<7) + m_inv_lut[mantissa];
inv_x_float = short_to_float(inv_x_val);
return inv_x_float;
}
__device__ float compute_exp(short x){
unsigned short x_no_sign, h_8bit, l_8bit, h, l;
float f1, f2, res;
x_no_sign = (unsigned short)x;
h_8bit = (x_no_sign>>8)&0X00FF;
l_8bit = x_no_sign&0X00FF;
//if(l_8bit>=0x00E0 && l_8bit <=0x00FF)
// h_8bit += 0x0001;
h = h_8bit/8*16 + h_8bit%8;
l = l_8bit/8*16 + l_8bit%8;
f1 = short_to_float(s_ilut_ab[h]);
f2 = short_to_float(s_flut_ab[l]);
res = as_bfloat16(f1*f2);
return res;
}
__device__ float compute_exp_soft(short x){
unsigned short x_no_sign, h_8bit, l_8bit, h, l;
float f1, f2, res;
x_no_sign = (unsigned short)x;
h_8bit = (x_no_sign>>8)&0X00FF;
l_8bit = x_no_sign&0X00FF;
//if(l_8bit>=0x00E0 && l_8bit <=0x00FF)
// h_8bit += 0x0001;
h = h_8bit/8*16 + h_8bit%8;
l = l_8bit/8*16 + l_8bit%8;
f1 = short_to_float(s_ilut_cd[h]);
f2 = short_to_float(s_flut_cd[l]);
res = as_bfloat16(f1*f2);
return res;
}
// cubic approximation of ln(x) in range [1, 2]:
__device__ static inline float small_ln(float x) {
x -= 1.0f;
return 0.6931471805599453f*x*(1.4201157697141027f + x*(-0.5747927782450741f + x*(0.15468105905881002f)));
}
//for ln(x) with x>0
__device__ float fast_ln(float x){
unsigned char exponent;
float mantissa, ln_mantissa;
float res_ln;
int x_int = *(int *)&x;
exponent = (x_int&0x7f800000) >> 23;
x_int &= 0x007FFFFF;//mask away the exp
x_int |= 0x3F800000;//set the exp to 127
mantissa = *(float *)&x_int;
ln_mantissa = small_ln(mantissa);
res_ln = ln_mantissa+(exponent-127)*LN2;
return as_bfloat16(res_ln);
}
template<typename Dtype>
__global__ static void _sigmoid_table_lookup_aie2(const int N,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
short in;
float exp_sigm;
float denom_sigm, denom_inv_sigm;
float sigmoid, in_float, res_float;
in_float = input[i] * pow(2.0, 12);
in = clip_int16(in_float);
exp_sigm = compute_exp(in);
denom_sigm = exp_sigm + 1.0;
denom_inv_sigm = vector_inv(denom_sigm);
sigmoid = as_bfloat16(exp_sigm * denom_inv_sigm);
res_float = as_bfloat16(sigmoid * pow(2.0, 15));
output[i] = clip_int16(res_float)* pow(2.0, -15);
}
}
template<typename Dtype>
void cuda_sigmoid_table_lookup_aie2(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype fuzz = 1.0 / 32768;
_sigmoid_table_lookup_aie2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
fuzz,
input,
output);
}
template
void cuda_sigmoid_table_lookup_aie2<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_sigmoid_table_lookup_aie2<double>(const int N,
const double* input,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _tanh_table_lookup_aie2(const int N,
const Dtype fuzz,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
short in;
float temp_tanh;
float denom_inv_tanh;
float tanh, in_float, res_float;
short res;
in_float = input[i] * pow(2.0, 12);
in = clip_int16(in_float);
temp_tanh = compute_exp(in);
temp_tanh = temp_tanh * temp_tanh;//e^2x
temp_tanh = as_bfloat16(temp_tanh);
denom_inv_tanh = vector_inv(temp_tanh + 1.0);//1/(e^2x + 1)
temp_tanh = as_bfloat16(temp_tanh) - 1.0;//e^2x-1
tanh = as_bfloat16(temp_tanh) * denom_inv_tanh;
res_float = as_bfloat16(tanh * pow(2.0, 15));
res = clip_int16(res_float);
output[i] = res* pow(2.0, -15);
}
}
template<typename Dtype>
void cuda_tanh_table_lookup_aie2(const int N,
const Dtype* input,
Dtype* output,
int fragpos)
{
Dtype scale = pow(2.0, fragpos);
Dtype fuzz = 1.0 / 32768;
_tanh_table_lookup_aie2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
fuzz,
input,
output);
}
template
void cuda_tanh_table_lookup_aie2<float>(const int N,
const float* input,
float* output,
int fragpos);
template
void cuda_tanh_table_lookup_aie2<double>(const int N,
const double* input,
double* output,
int fragpos);
template<typename Dtype>
__global__ static void _exp_appr_aie2(const int N,
const Dtype* input,
Dtype* output,
const int bit_width) {
NNDCT_KERNEL_LOOP(i, N){
float input_f;
if(input[i] < -63.0){
input_f = -63;
}
else{
input_f = input[i];
}
output[i] = compute_exp_soft(short(input_f * -1024));
}
}
template<typename Dtype>
void cuda_exp_appr_aie2(const int N,
const Dtype* input,
Dtype* output,
const int bit_width)
{
_exp_appr_aie2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output,
bit_width);
}
template
void cuda_exp_appr_aie2<float>(const int N,
const float* input,
float* output,
const int bit_width);
template
void cuda_exp_appr_aie2<double>(const int N,
const double* input,
double* output,
const int bit_width);
template<typename Dtype>
__global__ static void _log_softmax_fast_ln(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = fast_ln(input[i]);
}
}
template<typename Dtype>
void cuda_log_softmax_fast_ln(const int N,
const Dtype* input,
Dtype* output)
{
_log_softmax_fast_ln<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_log_softmax_fast_ln<float>(const int N,
const float* input,
float* output);
template
void cuda_log_softmax_fast_ln<double>(const int N,
const double* input,
double* output);
template<typename Dtype>
__global__ static void _log_softmax_sub(const int N,
const Dtype* input,
Dtype* output,
const Dtype* sum) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = as_bfloat16(as_bfloat16((float)input[i])-sum[0]);
}
}
template<typename Dtype>
void cuda_log_softmax_sub(const int N,
const Dtype* input,
Dtype* output,
const Dtype* sum)
{
_log_softmax_sub<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output,
sum);
}
template
void cuda_log_softmax_sub<float>(const int N,
const float* input,
float* output,
const float* sum);
template
void cuda_log_softmax_sub<double>(const int N,
const double* input,
double* output,
const double* sum);
// aie sqrt(x) = x*(1/sqrt(x)) with Newton iteration for 1/sqrt(x)
__device__ float _sqrt(const float x){
float x2, y, y3h, out;
int i;
x2 = x*0.5;
y = x;
i = float2int_cuda(y); // bitwise float32 to int32
i = (0x5f37 - (i >> 17)) << 16; // int32 -> int16 -> int32
y = int2float_cuda(i); // initial value: bitwise int32 to float32
// one step Newton iteration: y = y*(1.5 - (x2*y*y)) = 1.5*y - x2*y*y*y
y3h = 1.5*y; // float32
y3h = float2bfloat_cuda(y3h); // bfloat with 32 bitwidth
out = y*x2; // float32
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = out*y; // float32
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = out*y;
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = y3h - out; // float32: 1/sqrt(x) = 1.5*y - x2*y*y*y
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
out = x*out; // sqrt(x) = x*(1/sqrt(x))
out = float2bfloat_cuda(out); // bfloat with 32 bitwidth
return out;
}
template<typename Dtype>
__global__ static void _aie_sqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = _sqrt(input[i]);
}
}
template<typename Dtype>
void cuda_aie_sqrt(const int N,
const Dtype* input,
Dtype* output)
{
_aie_sqrt<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_aie_sqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_aie_sqrt<double>(const int N,
const double* input,
double* output);
/*
AIE2 isqrt, bfloat16 iteration
*/
__device__ float _isqrt(float x){
float x2, y, y2, mul2, mul, sub, threehalfs;
int i;
x2 = x*0.5;
x2 = float2bfloat_cuda(x2); // bitwise float32 to bfloat16
y = x;
i = float2short_cuda(y); // bitwise float32 to short (int16)
i = (0x5f37 - (short_downshift_onebit_cuda(i) >> 17)) << 16; // fake short
y = int2bfloat_cuda(i); // initial value: bitwise short to bfloat16
threehalfs = float2bfloat_cuda(1.5); // bfloat16
// 4-steps-Newton iteration: y = y*(1.5 - (x2*y*y))
for(int i=0; i<4; i++){
y2 = y*y;
y2 = float2bfloat_cuda(y2); // bfloat with 32 bitwidth
mul2 = x2*y2;
mul2 = float2bfloat_cuda(mul2);
sub = threehalfs - mul2;
sub = float2bfloat_cuda(sub);
mul = y*sub;
y = float2bfloat_cuda(mul);
}
return y;
}
template<typename Dtype>
__global__ static void _aie_isqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = _isqrt(input[i]);
}
}
template<typename Dtype>
void cuda_aie_isqrt(const int N,
const Dtype* input,
Dtype* output)
{
_aie_isqrt<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_aie_isqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_aie_isqrt<double>(const int N,
const double* input,
double* output);
/*
Layernorm isqrt AIE2, float32 iteration
*/
__device__ float isqrt(float x){
float x2, y, threehalfs;
int i;
x2 = x*0.5;
y = x;
threehalfs = 1.5;
i = float2int_cuda(y); // bitwise float32 to int32
i = 0x5f3759df - (i >> 1);
y = int2float_cuda(i); // bitwise int32 to float32
y = y*(threehalfs - (x2*y*y)); // Newton steps
y = y*(threehalfs - (x2*y*y));
y = y*(threehalfs - (x2*y*y));
y = y*(threehalfs - (x2*y*y));
return y;
}
template<typename Dtype>
__global__ static void _layernorm_isqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = isqrt(input[i]);
}
}
template<typename Dtype>
void cuda_layernorm_isqrt(const int N,
const Dtype* input,
Dtype* output)
{
_layernorm_isqrt<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_layernorm_isqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_layernorm_isqrt<double>(const int N,
const double* input,
double* output);
/*
Layernorm Inv Sqrt AIE2
*/
__device__ float invsqrt(float x){
x = as_bfloat16_numpy(x);
short i;
float x1,x2;
float y1,y2,y;
x1 = as_bfloat16_numpy(x * 0.5);
x2 = as_bfloat16_numpy(x);
i = bfloat16(x2);
i = 0x5f37 - ( i >> 1 );
x2 = rbfloat(i);
y2 = as_bfloat16_numpy(x2 * 1.5);
y1 = x1*x2*x2*x2;
y1 = as_bfloat16_numpy(y1);
y = y2-y1;
y = as_bfloat16_numpy(y);
return y;
}
template<typename Dtype>
__global__ static void _layernorm_invsqrt(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = invsqrt(input[i]);
}
}
template<typename Dtype>
void cuda_layernorm_invsqrt(const int N,
const Dtype* input,
Dtype* output)
{
_layernorm_invsqrt<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_layernorm_invsqrt<float>(const int N,
const float* input,
float* output);
template
void cuda_layernorm_invsqrt<double>(const int N,
const double* input,
double* output);
/*
AIE2 Softmax
*/
template<typename Dtype>
__global__ static void _inverse_aie2(const int N,
const Dtype* input,
Dtype* output) {
NNDCT_KERNEL_LOOP(i, N){
output[i] = compute_inv(input[i]);
}
}
template<typename Dtype>
void cuda_inverse_aie2(const int N,
const Dtype* input,
Dtype* output)
{
_inverse_aie2<<<NNDCT_GET_BLOCKS(N),NNDCT_CUDA_NUM_THREADS>>>(
N,
input,
output);
}
template
void cuda_inverse_aie2<float>(const int N,
const float* input,
float* output);
template
void cuda_inverse_aie2<double>(const int N,
const double* input,
double* output);
|
0f176e6c40dce3a05d44a952175a932b9e098c7b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include "constants_types.h"
#include "kernel.h"
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
fArray *d_cur_forward;
fArray *d_next_forward;
fArray *d_emis;
tArray *d_trans;
lArray *d_like;
sArray *d_start;
dim3 dimGrid(batch);
dim3 dimBlock(states-1);
size_t forward_matrix_size = (x_dim+1)*(y_dim+1)*batch*(states-1)*sizeof(double);
size_t emissions_size = (x_dim+1)*(y_dim+1)*batch*(states-1)*sizeof(double);
size_t transitions_size = (x_dim+1)*(states-1)*states*batch*sizeof(double);
size_t start_transitions_size = batch*(states-1)*sizeof(double);
size_t likelihood_size = 2*2*(states-1)*batch*sizeof(double);
fArray *h_cur_forward = (fArray*) malloc (forward_matrix_size);
fArray *h_emis = (fArray*) malloc (emissions_size);
tArray *h_trans = (tArray*) malloc (transitions_size);
lArray *h_like = (lArray*) malloc (likelihood_size);
sArray *h_start = (sArray*) malloc (start_transitions_size);
std::default_random_engine rng (123);
std::uniform_real_distribution<double> dist (0.0, 1.0);
for (int i = 0; i < x_dim+1; i++) {
for (int j = 0; j < y_dim+1; j++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
h_cur_forward[i][j][b][s] = dist(rng);
h_emis[i][j][b][s] = dist(rng);
}
}
}
}
for (int i = 0; i < x_dim+1; i++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
for (int t = 0; t < states; t++) {
h_trans[i][b][s][t] = dist(rng);
}
}
}
}
for (int i = 0; i < batch; i++) {
for (int s = 0; s < states-1; s++) {
h_start[i][s] = dist(rng);
}
}
for (int i = 0; i < 2; i++) {
for (int j = 0; j< 2; j++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
h_like[i][j][b][s] = dist(rng);
}
}
}
}
hipMalloc((void**)&d_cur_forward, forward_matrix_size);
hipMemcpy(d_cur_forward, h_cur_forward, forward_matrix_size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_next_forward, forward_matrix_size);
hipMalloc((void**)&d_emis, emissions_size);
hipMemcpy(d_emis, h_emis, forward_matrix_size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_trans, transitions_size);
hipMemcpy(d_trans, h_trans, transitions_size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_like, likelihood_size);
hipMemcpy(d_like, h_like, likelihood_size, hipMemcpyHostToDevice);
hipMalloc((void**)&d_start, start_transitions_size);
hipMemcpy(d_start, h_start, start_transitions_size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto t1 = std::chrono::high_resolution_clock::now();
for(int count = 0; count < repeat; count++) {
for (int i = 1; i < x_dim + 1; i++) {
for (int j = 1; j < y_dim + 1; j++) {
hipLaunchKernelGGL(( pair_HMM_forward), dim3(dimGrid), dim3(dimBlock), 0, 0, i, j, d_cur_forward, d_trans, d_emis, d_like, d_start, d_next_forward);
auto t = d_cur_forward;
d_cur_forward = d_next_forward;
d_next_forward = t;
}
}
}
hipDeviceSynchronize();
auto t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> milli = (t2 - t1);
std::cout << "Total execution time " << milli.count() << " milliseconds\n" ;
hipMemcpy(h_cur_forward, d_cur_forward, forward_matrix_size, hipMemcpyDeviceToHost);
double checkSum = 0.0;
for (int i = 0; i < x_dim+1; i++) {
for (int j = 0; j < y_dim+1; j++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
#ifdef DEBUG
std::cout << h_cur_forward[i][j][b][s] << std::endl;
#endif
checkSum += h_cur_forward[i][j][b][s];
}
}
}
}
std::cout << "Checksum " << checkSum << std::endl;
hipFree(d_cur_forward);
hipFree(d_next_forward);
hipFree(d_emis);
hipFree(d_trans);
hipFree(d_like);
hipFree(d_start);
free(h_cur_forward);
free(h_emis);
free(h_trans);
free(h_like);
free(h_start);
return 0;
}
| 0f176e6c40dce3a05d44a952175a932b9e098c7b.cu | #include <iostream>
#include <cstdlib>
#include <chrono>
#include <random>
#include <cuda.h>
#include "constants_types.h"
#include "kernel.h"
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
fArray *d_cur_forward;
fArray *d_next_forward;
fArray *d_emis;
tArray *d_trans;
lArray *d_like;
sArray *d_start;
dim3 dimGrid(batch);
dim3 dimBlock(states-1);
size_t forward_matrix_size = (x_dim+1)*(y_dim+1)*batch*(states-1)*sizeof(double);
size_t emissions_size = (x_dim+1)*(y_dim+1)*batch*(states-1)*sizeof(double);
size_t transitions_size = (x_dim+1)*(states-1)*states*batch*sizeof(double);
size_t start_transitions_size = batch*(states-1)*sizeof(double);
size_t likelihood_size = 2*2*(states-1)*batch*sizeof(double);
fArray *h_cur_forward = (fArray*) malloc (forward_matrix_size);
fArray *h_emis = (fArray*) malloc (emissions_size);
tArray *h_trans = (tArray*) malloc (transitions_size);
lArray *h_like = (lArray*) malloc (likelihood_size);
sArray *h_start = (sArray*) malloc (start_transitions_size);
std::default_random_engine rng (123);
std::uniform_real_distribution<double> dist (0.0, 1.0);
for (int i = 0; i < x_dim+1; i++) {
for (int j = 0; j < y_dim+1; j++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
h_cur_forward[i][j][b][s] = dist(rng);
h_emis[i][j][b][s] = dist(rng);
}
}
}
}
for (int i = 0; i < x_dim+1; i++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
for (int t = 0; t < states; t++) {
h_trans[i][b][s][t] = dist(rng);
}
}
}
}
for (int i = 0; i < batch; i++) {
for (int s = 0; s < states-1; s++) {
h_start[i][s] = dist(rng);
}
}
for (int i = 0; i < 2; i++) {
for (int j = 0; j< 2; j++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
h_like[i][j][b][s] = dist(rng);
}
}
}
}
cudaMalloc((void**)&d_cur_forward, forward_matrix_size);
cudaMemcpy(d_cur_forward, h_cur_forward, forward_matrix_size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_next_forward, forward_matrix_size);
cudaMalloc((void**)&d_emis, emissions_size);
cudaMemcpy(d_emis, h_emis, forward_matrix_size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_trans, transitions_size);
cudaMemcpy(d_trans, h_trans, transitions_size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_like, likelihood_size);
cudaMemcpy(d_like, h_like, likelihood_size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_start, start_transitions_size);
cudaMemcpy(d_start, h_start, start_transitions_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto t1 = std::chrono::high_resolution_clock::now();
for(int count = 0; count < repeat; count++) {
for (int i = 1; i < x_dim + 1; i++) {
for (int j = 1; j < y_dim + 1; j++) {
pair_HMM_forward<<<dimGrid, dimBlock>>>(i, j, d_cur_forward, d_trans, d_emis, d_like, d_start, d_next_forward);
auto t = d_cur_forward;
d_cur_forward = d_next_forward;
d_next_forward = t;
}
}
}
cudaDeviceSynchronize();
auto t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> milli = (t2 - t1);
std::cout << "Total execution time " << milli.count() << " milliseconds\n" ;
cudaMemcpy(h_cur_forward, d_cur_forward, forward_matrix_size, cudaMemcpyDeviceToHost);
double checkSum = 0.0;
for (int i = 0; i < x_dim+1; i++) {
for (int j = 0; j < y_dim+1; j++) {
for (int b = 0; b < batch; b++) {
for (int s = 0; s < states-1; s++) {
#ifdef DEBUG
std::cout << h_cur_forward[i][j][b][s] << std::endl;
#endif
checkSum += h_cur_forward[i][j][b][s];
}
}
}
}
std::cout << "Checksum " << checkSum << std::endl;
cudaFree(d_cur_forward);
cudaFree(d_next_forward);
cudaFree(d_emis);
cudaFree(d_trans);
cudaFree(d_like);
cudaFree(d_start);
free(h_cur_forward);
free(h_emis);
free(h_trans);
free(h_like);
free(h_start);
return 0;
}
|
98d0b6f30937cc1c8f0e80918c88b8820fef7f80.hip | // !!! This is a file automatically generated by hipify!!!
// =================================================================
//
// File: intro6.cu
// Author: Pedro Perez
// Description: This file shows some of the basic CUDA directives.
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "utils.h"
#define SIZE 1000000 //1e6
#define THREADS 256
#define BLOCKS MMAX(32, ((SIZE / THREADS) + 1))
__global__ void add(int *a, int *b, int *c) {
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i < SIZE) {
c[i] = a[i] + b[i];
}
}
int main(int argc, char* argv[]) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int*) malloc(SIZE * sizeof(int));
fill_array(a, SIZE);
display_array("a", a);
b = (int*) malloc(SIZE * sizeof(int));
fill_array(b, SIZE);
display_array("b", b);
c = (int*) malloc(SIZE * sizeof(int));
hipMalloc((void**) &d_a, SIZE * sizeof(int));
hipMalloc((void**) &d_b, SIZE * sizeof(int));
hipMalloc((void**) &d_c, SIZE * sizeof(int));
hipMemcpy(d_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(BLOCKS), dim3(THREADS), 0, 0, d_a, d_b, d_c);
hipMemcpy(c, d_c, SIZE * sizeof(int), hipMemcpyDeviceToHost);
display_array("c", c);
hipFree(d_c);
hipFree(d_b);
hipFree(d_a);
free(c);
free(b);
free(a);
return 0;
}
| 98d0b6f30937cc1c8f0e80918c88b8820fef7f80.cu | // =================================================================
//
// File: intro6.cu
// Author: Pedro Perez
// Description: This file shows some of the basic CUDA directives.
//
// Copyright (c) 2020 by Tecnologico de Monterrey.
// All Rights Reserved. May be reproduced for any non-commercial
// purpose.
//
// =================================================================
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "utils.h"
#define SIZE 1000000 //1e6
#define THREADS 256
#define BLOCKS MMAX(32, ((SIZE / THREADS) + 1))
__global__ void add(int *a, int *b, int *c) {
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i < SIZE) {
c[i] = a[i] + b[i];
}
}
int main(int argc, char* argv[]) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
a = (int*) malloc(SIZE * sizeof(int));
fill_array(a, SIZE);
display_array("a", a);
b = (int*) malloc(SIZE * sizeof(int));
fill_array(b, SIZE);
display_array("b", b);
c = (int*) malloc(SIZE * sizeof(int));
cudaMalloc((void**) &d_a, SIZE * sizeof(int));
cudaMalloc((void**) &d_b, SIZE * sizeof(int));
cudaMalloc((void**) &d_c, SIZE * sizeof(int));
cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
add<<<BLOCKS, THREADS>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
display_array("c", c);
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
free(c);
free(b);
free(a);
return 0;
}
|
eec573482315431d711d4b5a4226a12a2f80363c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "leftBoundaryKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *temperature = NULL;
hipMalloc(&temperature, XSIZE*YSIZE);
int block_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
leftBoundaryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,block_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
leftBoundaryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,block_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
leftBoundaryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, temperature,block_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | eec573482315431d711d4b5a4226a12a2f80363c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "leftBoundaryKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *temperature = NULL;
cudaMalloc(&temperature, XSIZE*YSIZE);
int block_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
leftBoundaryKernel<<<gridBlock,threadBlock>>>(temperature,block_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
leftBoundaryKernel<<<gridBlock,threadBlock>>>(temperature,block_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
leftBoundaryKernel<<<gridBlock,threadBlock>>>(temperature,block_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c82076685b27e3bbd40316a3a11c44670d7f3694.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void unique_idx_calc_threadInx(int* input)
{
int thread_id = threadIdx.x;
printf("threadIdx: %d, value: %d \n", thread_id, input[thread_id]);
}
__global__ void unique_grid_calculation(int* input)
{
int thread_id = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int grid_id = thread_id + offset;
printf("blockIdx.x: %d, threadIdx: %d, gid: %d, value: %d \n",
blockIdx.x, thread_id, grid_id, input[grid_id]);
}
/*
int main()
{
int array_size = 16;
int array_byte_size = sizeof(int) * array_size;
int h_data[] = { 23, 9, 4, 53, 65, 12, 1, 33, 87, 45, 23, 12, 342, 56, 44, 99 };
for (int i = 0; i < array_size; i++)
{
printf("%d ", h_data[i]);
}
printf("\n\n");
int* d_data;
hipMalloc((void**)&d_data, array_byte_size);
hipMemcpy(d_data, h_data, array_byte_size, hipMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
unique_grid_calculation<<<grid, block>>>(d_data);
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
*/ | c82076685b27e3bbd40316a3a11c44670d7f3694.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void unique_idx_calc_threadInx(int* input)
{
int thread_id = threadIdx.x;
printf("threadIdx: %d, value: %d \n", thread_id, input[thread_id]);
}
__global__ void unique_grid_calculation(int* input)
{
int thread_id = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int grid_id = thread_id + offset;
printf("blockIdx.x: %d, threadIdx: %d, gid: %d, value: %d \n",
blockIdx.x, thread_id, grid_id, input[grid_id]);
}
/*
int main()
{
int array_size = 16;
int array_byte_size = sizeof(int) * array_size;
int h_data[] = { 23, 9, 4, 53, 65, 12, 1, 33, 87, 45, 23, 12, 342, 56, 44, 99 };
for (int i = 0; i < array_size; i++)
{
printf("%d ", h_data[i]);
}
printf("\n\n");
int* d_data;
cudaMalloc((void**)&d_data, array_byte_size);
cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
unique_grid_calculation<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
*/ |
544cd5ade84667530049902d4b4c34af2aa8265c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from ztranspose_conj.cu normal z -> c, Fri Sep 11 18:29:21 2015
@author Stan Tomov
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_c
#if defined(PRECISION_z)
#define NX 16
#else
#define NX 32
#endif
#define NB 32
#define NY 8
// nearly same code in ctranspose.cu
// tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB.
// uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly.
// subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB
// for each subtile
// load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY
// save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY
// A += NX
// AT += NX*ldat
//
// e.g., with NB=32, NX=32, NY=8 ([sdc] precisions)
// load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 )
// save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14)
//
// e.g., with NB=32, NX=16, NY=8 (z precision)
// load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14)
// save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12)
// (AT21 AT22)
static __device__ void
ctranspose_conj_device(
int m, int n,
const magmaFloatComplex *A, int lda,
magmaFloatComplex *AT, int ldat)
{
__shared__ magmaFloatComplex sA[NB][NX+1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int ibx = blockIdx.x*NB;
int iby = blockIdx.y*NB;
int i, j;
A += ibx + tx + (iby + ty)*lda;
AT += iby + tx + (ibx + ty)*ldat;
#pragma unroll
for( int tile=0; tile < NB/NX; ++tile ) {
// load NX-by-NB subtile transposed from A into sA
i = ibx + tx + tile*NX;
j = iby + ty;
if (i < m) {
#pragma unroll
for( int j2=0; j2 < NB; j2 += NY ) {
if (j + j2 < n) {
sA[ty + j2][tx] = MAGMA_C_CNJG( A[j2*lda] );
}
}
}
__syncthreads();
// save NB-by-NX subtile from sA into AT
i = iby + tx;
j = ibx + ty + tile*NX;
#pragma unroll
for( int i2=0; i2 < NB; i2 += NX ) {
if (i + i2 < n) {
#pragma unroll
for( int j2=0; j2 < NX; j2 += NY ) {
if (j + j2 < m) {
AT[i2 + j2*ldat] = sA[tx + i2][ty + j2];
}
}
}
}
__syncthreads();
// move to next subtile
A += NX;
AT += NX*ldat;
}
}
/*
kernel wrapper to call the device function.
*/
__global__
void ctranspose_conj_kernel(
int m, int n,
const magmaFloatComplex *A, int lda,
magmaFloatComplex *AT, int ldat)
{
ctranspose_conj_device(m, n, A, lda, AT, ldat);
}
__global__
void ctranspose_conj_kernel_batched(
int m, int n,
magmaFloatComplex **dA_array, int lda,
magmaFloatComplex **dAT_array, int ldat)
{
int batchid = blockIdx.z;
ctranspose_conj_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat);
}
/**
Purpose
-------
ctranspose_conj_q copies and conjugate-transposes a matrix dA to matrix dAT.
Same as ctranspose_conj, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT COMPLEX array, dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj_q(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dAT, magma_int_t lddat,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) );
hipLaunchKernelGGL(( ctranspose_conj_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, dA, ldda, dAT, lddat );
}
/**
@see magmablas_ctranspose_conj_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dAT, magma_int_t lddat )
{
magmablas_ctranspose_conj_q( m, n, dA, ldda, dAT, lddat, magma_stream );
}
/**
Purpose
-------
ctranspose_conj_batched_q copies and conjugate-transposes a matrix dA_array[i] to matrix dAT_array[i].
Same as ctranspose_conj_batched, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA_array
COMPLEX* array, dimension (batchCount)
array of pointers to the matrices dA, where each dA is of dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT_array
COMPLEX* array, dimension (batchCount)
array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@param[in]
batchCount Number of matrices in dA_array and dAT_array
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj_batched_q(
magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount );
hipLaunchKernelGGL(( ctranspose_conj_kernel_batched), dim3(grid), dim3(threads), 0, queue ,
m, n, dA_array, ldda, dAT_array, lddat );
}
/**
@see magmablas_ctranspose_conj_batched_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount )
{
magmablas_ctranspose_conj_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream );
}
| 544cd5ade84667530049902d4b4c34af2aa8265c.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from ztranspose_conj.cu normal z -> c, Fri Sep 11 18:29:21 2015
@author Stan Tomov
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_c
#if defined(PRECISION_z)
#define NX 16
#else
#define NX 32
#endif
#define NB 32
#define NY 8
// nearly same code in ctranspose.cu
// tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB.
// uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly.
// subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB
// for each subtile
// load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY
// save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY
// A += NX
// AT += NX*ldat
//
// e.g., with NB=32, NX=32, NY=8 ([sdc] precisions)
// load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 )
// save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14)
//
// e.g., with NB=32, NX=16, NY=8 (z precision)
// load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14)
// save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12)
// (AT21 AT22)
static __device__ void
ctranspose_conj_device(
int m, int n,
const magmaFloatComplex *A, int lda,
magmaFloatComplex *AT, int ldat)
{
__shared__ magmaFloatComplex sA[NB][NX+1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int ibx = blockIdx.x*NB;
int iby = blockIdx.y*NB;
int i, j;
A += ibx + tx + (iby + ty)*lda;
AT += iby + tx + (ibx + ty)*ldat;
#pragma unroll
for( int tile=0; tile < NB/NX; ++tile ) {
// load NX-by-NB subtile transposed from A into sA
i = ibx + tx + tile*NX;
j = iby + ty;
if (i < m) {
#pragma unroll
for( int j2=0; j2 < NB; j2 += NY ) {
if (j + j2 < n) {
sA[ty + j2][tx] = MAGMA_C_CNJG( A[j2*lda] );
}
}
}
__syncthreads();
// save NB-by-NX subtile from sA into AT
i = iby + tx;
j = ibx + ty + tile*NX;
#pragma unroll
for( int i2=0; i2 < NB; i2 += NX ) {
if (i + i2 < n) {
#pragma unroll
for( int j2=0; j2 < NX; j2 += NY ) {
if (j + j2 < m) {
AT[i2 + j2*ldat] = sA[tx + i2][ty + j2];
}
}
}
}
__syncthreads();
// move to next subtile
A += NX;
AT += NX*ldat;
}
}
/*
kernel wrapper to call the device function.
*/
__global__
void ctranspose_conj_kernel(
int m, int n,
const magmaFloatComplex *A, int lda,
magmaFloatComplex *AT, int ldat)
{
ctranspose_conj_device(m, n, A, lda, AT, ldat);
}
__global__
void ctranspose_conj_kernel_batched(
int m, int n,
magmaFloatComplex **dA_array, int lda,
magmaFloatComplex **dAT_array, int ldat)
{
int batchid = blockIdx.z;
ctranspose_conj_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat);
}
/**
Purpose
-------
ctranspose_conj_q copies and conjugate-transposes a matrix dA to matrix dAT.
Same as ctranspose_conj, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT COMPLEX array, dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj_q(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dAT, magma_int_t lddat,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) );
ctranspose_conj_kernel<<< grid, threads, 0, queue >>>
( m, n, dA, ldda, dAT, lddat );
}
/**
@see magmablas_ctranspose_conj_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_ptr dAT, magma_int_t lddat )
{
magmablas_ctranspose_conj_q( m, n, dA, ldda, dAT, lddat, magma_stream );
}
/**
Purpose
-------
ctranspose_conj_batched_q copies and conjugate-transposes a matrix dA_array[i] to matrix dAT_array[i].
Same as ctranspose_conj_batched, but adds queue argument.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA_array
COMPLEX* array, dimension (batchCount)
array of pointers to the matrices dA, where each dA is of dimension (LDDA,N)
The M-by-N matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= M.
@param[in]
dAT_array
COMPLEX* array, dimension (batchCount)
array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M)
The N-by-M matrix dAT.
@param[in]
lddat INTEGER
The leading dimension of the array dAT. LDDAT >= N.
@param[in]
queue magma_queue_t
Queue to execute in.
@param[in]
batchCount Number of matrices in dA_array and dAT_array
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj_batched_q(
magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -4;
else if ( lddat < n )
info = -6;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( NX, NY );
dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount );
ctranspose_conj_kernel_batched<<< grid, threads, 0, queue >>>
( m, n, dA_array, ldda, dAT_array, lddat );
}
/**
@see magmablas_ctranspose_conj_batched_q
@ingroup magma_caux2
********************************************************************/
extern "C" void
magmablas_ctranspose_conj_batched(
magma_int_t m, magma_int_t n,
magmaFloatComplex **dA_array, magma_int_t ldda,
magmaFloatComplex **dAT_array, magma_int_t lddat, magma_int_t batchCount )
{
magmablas_ctranspose_conj_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream );
}
|
b7a704d98ea100dae72bddbce6c715527ad069b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "diffusion3d_cuda_temporal_blocking.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define CUDA_SAFE_CALL(c) \
do { \
hipError_t _e = c; \
if (_e != hipSuccess) { \
fprintf(stderr, "Error: %s\n", hipGetErrorString(_e)); \
} \
} while (0)
namespace diffusion3d {
#if 0
__global__ void diffusion_kernel_temporal_blocking_1st_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void diffusion_kernel_temporal_blocking_2nd_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + min(threadIdx.x, blockDim.x - 3);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + min(threadIdx.y, blockDim.y - 3);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
#if 0
if (threadIdx.x > 0 && threadIdx.x < (blockDim.x - 1) &&
threadIdx.y > 0 && threadIdx.y < (blockDim.y - 1)) {
#endif
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
#if 0
}
#endif
c += xy;
}
return;
}
#endif
__global__ void diffusion_kernel_temporal_blocking(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int i, j, c, sc;
int i2, j2, c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
i2 = (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3);
i2 = min(i2, nx-1);
j2 = (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3);
j2 = min(j2, ny-1);
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
for (int k = 1; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
__syncthreads();
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = (k-1 == 0) ? sb2 + sc2 : sb1 + sc2;
REAL *tv = sb3 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = sb1 + sc2;
REAL *tv = sb2 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
return;
}
__global__ void diffusion_kernel_temporal_blocking2(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int c, sc;
int c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
const int i = min(
nx-1, max(0,
(blockDim.x - 2) * blockIdx.x + threadIdx.x - 1));
const int j =
min(ny-1,
max(0, (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1));
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
t += xy;
const int i2 = min(nx-1, (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3));
const int j2 = min(ny-1, (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3));
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w2 = (i2 == 0) ? sc2 : sc2 - 1;
int e2 = (i2 == nx-1) ? sc2 : sc2 + 1;
int n2 = (j2 == 0) ? sc2 : sc2 - blockDim.x;
int s2 = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
{
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb2[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
for (int k = 2; k < nz-1; ++k) {
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
sb3[sc] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[c];
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb2[sc2];
return;
}
void Diffusion3DCUDATemporalBlocking::RunKernel(int count) {
int flag = 0;
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
assert(block_x_ > 2);
assert(block_y_ > 2);
dim3 block_dim(block_x_, block_y_);
dim3 grid_dim(nx_ / (block_x_ - 2), ny_ / (block_y_ - 2), 1);
if (nx_ % (block_x_ - 2)) ++grid_dim.x;
if (ny_ % (block_y_ - 2)) ++grid_dim.y;
size_t shared_size = sizeof(REAL) * block_dim.x * block_dim.y * 3;
printf("Shared memory size: %ld bytes\n", shared_size);
#pragma omp parallel num_threads(2) shared(flag)
{
if (omp_get_thread_num() == 0)
{
power = GetPowerGPU(&flag, 0);
}
else
{
#pragma omp barrier
CUDA_SAFE_CALL(hipEventRecord(ev1_));
for (int i = 0; i < count; i += 2) {
#if 1
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking2),
dim3(grid_dim), dim3(block_dim), shared_size, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(hipGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking),
dim3(grid_dim), dim3(block_dim), shared_size, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(hipGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_1st_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_2nd_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#elif 0
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_1st_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_1st_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#else
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_2nd_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_2nd_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#endif
}
CUDA_SAFE_CALL(hipEventRecord(ev2_));
CUDA_SAFE_CALL(hipDeviceSynchronize());
flag = 1;
}
}
CUDA_SAFE_CALL(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
}
| b7a704d98ea100dae72bddbce6c715527ad069b3.cu | #include "diffusion3d_cuda_temporal_blocking.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define CUDA_SAFE_CALL(c) \
do { \
cudaError_t _e = c; \
if (_e != cudaSuccess) { \
fprintf(stderr, "Error: %s\n", cudaGetErrorString(_e)); \
} \
} while (0)
namespace diffusion3d {
#if 0
__global__ void diffusion_kernel_temporal_blocking_1st_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void diffusion_kernel_temporal_blocking_2nd_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + min(threadIdx.x, blockDim.x - 3);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + min(threadIdx.y, blockDim.y - 3);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
#if 0
if (threadIdx.x > 0 && threadIdx.x < (blockDim.x - 1) &&
threadIdx.y > 0 && threadIdx.y < (blockDim.y - 1)) {
#endif
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
#if 0
}
#endif
c += xy;
}
return;
}
#endif
__global__ void diffusion_kernel_temporal_blocking(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int i, j, c, sc;
int i2, j2, c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
i2 = (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3);
i2 = min(i2, nx-1);
j2 = (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3);
j2 = min(j2, ny-1);
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
for (int k = 1; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
__syncthreads();
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = (k-1 == 0) ? sb2 + sc2 : sb1 + sc2;
REAL *tv = sb3 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = sb1 + sc2;
REAL *tv = sb2 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
return;
}
__global__ void diffusion_kernel_temporal_blocking2(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int c, sc;
int c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
const int i = min(
nx-1, max(0,
(blockDim.x - 2) * blockIdx.x + threadIdx.x - 1));
const int j =
min(ny-1,
max(0, (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1));
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
t += xy;
const int i2 = min(nx-1, (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3));
const int j2 = min(ny-1, (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3));
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w2 = (i2 == 0) ? sc2 : sc2 - 1;
int e2 = (i2 == nx-1) ? sc2 : sc2 + 1;
int n2 = (j2 == 0) ? sc2 : sc2 - blockDim.x;
int s2 = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
{
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb2[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
for (int k = 2; k < nz-1; ++k) {
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
sb3[sc] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[c];
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb2[sc2];
return;
}
void Diffusion3DCUDATemporalBlocking::RunKernel(int count) {
int flag = 0;
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
assert(block_x_ > 2);
assert(block_y_ > 2);
dim3 block_dim(block_x_, block_y_);
dim3 grid_dim(nx_ / (block_x_ - 2), ny_ / (block_y_ - 2), 1);
if (nx_ % (block_x_ - 2)) ++grid_dim.x;
if (ny_ % (block_y_ - 2)) ++grid_dim.y;
size_t shared_size = sizeof(REAL) * block_dim.x * block_dim.y * 3;
printf("Shared memory size: %ld bytes\n", shared_size);
#pragma omp parallel num_threads(2) shared(flag)
{
if (omp_get_thread_num() == 0)
{
power = GetPowerGPU(&flag, 0);
}
else
{
#pragma omp barrier
CUDA_SAFE_CALL(cudaEventRecord(ev1_));
for (int i = 0; i < count; i += 2) {
#if 1
diffusion_kernel_temporal_blocking2<<<
grid_dim, block_dim, shared_size>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(cudaGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
diffusion_kernel_temporal_blocking<<<
grid_dim, block_dim, shared_size>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(cudaGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
diffusion_kernel_temporal_blocking_1st_half<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
diffusion_kernel_temporal_blocking_2nd_half<<<grid_dim, block_dim>>>
(f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#elif 0
diffusion_kernel_temporal_blocking_1st_half<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
diffusion_kernel_temporal_blocking_1st_half<<<grid_dim, block_dim>>>
(f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#else
diffusion_kernel_temporal_blocking_2nd_half<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
diffusion_kernel_temporal_blocking_2nd_half<<<grid_dim, block_dim>>>
(f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#endif
}
CUDA_SAFE_CALL(cudaEventRecord(ev2_));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
flag = 1;
}
}
CUDA_SAFE_CALL(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
}
|
c3a5e0df5f94b5846100cf824a0cb0b503fb2d90.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <torch/types.h>
#include <hipsparse.h>
#include "computeUtil.h"
__global__ void topoCacheCoarsenSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_indptr[rid];
int hb = A_indptr[rid+1];
int ptr = lb+threadIdx.x;
int offset;
float acc1 = sum_init();
float acc2 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
acc1 = sum_reduce(acc1, B[offset]);
acc2 = sum_reduce(acc2, B[(offset+32)]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
C[offset+32] = acc2;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
if (nout>1) {
acc2 = sum_reduce(acc2, B[(offset+32)]);}
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));}
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
if (nout>1) {
C[offset+32] = acc2;}
}
}
}
__global__ void topoCacheSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset + threadIdx.x;
int cid = (blockIdx.y<<5)+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
int offset;
int ptr = lb+threadIdx.x;
float acc1 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[sm_offset+kk]+cid;
acc1 = sum_reduce(acc1, B[offset]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
}
}
}
__global__ void topoSimpleSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
float acc1 = sum_init();
int offset;
for (int ptr=lb; ptr<hb; ptr++) {
// offset = __ldg(A_indices+ptr)*k+threadIdx.x;
// acc1 = sum_reduce(acc1, __ldg(B+offset));
offset = A_indices[ptr]*k+threadIdx.x;
acc1 = sum_reduce(acc1, B[offset]);
}
C[(rid*k+threadIdx.x)] = acc1;
}
}
torch::Tensor spmm_cuda_no_edge_value(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
hipLaunchKernelGGL(( topoSimpleSPMMKernel), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0,
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+3)/4;
hipLaunchKernelGGL(( topoCacheSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,4,1)), 128*sizeof(int), 0,
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
hipLaunchKernelGGL(( topoCacheCoarsenSPMMKernel), dim3(dim3(n_block,tile_k,1)), dim3(dim3(32,8,1)), 8*32*sizeof(int), 0,
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
}
__global__ void spmm_test0(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int offset = 0;
float acc=0;
if (blockIdx.y!=gridDim.y-1){
for (int ptr = lb; ptr<hb; ptr++) {
offset = A_csrColInd[ptr]*B_ncols+cid;
acc += A_csrVal[ptr]*B_dnVal[offset];
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int ptr = lb; ptr<hb; ptr++) {
if (cid<B_ncols) {
offset = A_csrColInd[ptr]*B_ncols+cid;}
acc += A_csrVal[ptr]*B_dnVal[offset];
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;}
}
}
}
__global__ void spmm_test1(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc=0;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
__syncwarp();
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (cid<B_ncols) {
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
}
__syncwarp();
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;
}
}
}
}
__global__ void spmm_test2(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc1=0, acc2=0, val;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
val = val_sh[(shmem_offset+kk)];
acc1 += val*B_dnVal[offset];
acc2 += val*B_dnVal[offset+32];
}
__syncwarp();
}
offset = rid*B_ncols+cid;
C_dnVal[offset] = acc1;
C_dnVal[offset+32] = acc2;
}
else {
int nout = (B_ncols-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
val = val_sh[(shmem_offset+kk)];
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (nout>0) {
acc1 += val*B_dnVal[offset];
}
if (nout>1) {
acc2 += val*B_dnVal[offset+32];
}
}
__syncwarp();
}
offset = rid*B_ncols+cid;
if (nout>0) {
C_dnVal[offset] = acc1;
}
if (nout>1) {
C_dnVal[(offset+32)] = acc2;
}
}
}
}
void csr2cscKernel(int m, int n, int nnz,
int *csrRowPtr, int *csrColInd, float *csrVal,
int *cscColPtr, int *cscRowInd, float *cscVal
)
{
hipsparseHandle_t handle;
size_t bufferSize = 0;
void* buffer = NULL;
checkCuSparseError(hipsparseCsr2cscEx2_bufferSize(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
HIP_R_32F,
HIPSPARSE_ACTION_SYMBOLIC,
HIPSPARSE_INDEX_BASE_ZERO,
HIPSPARSE_CSR2CSC_ALG1,
&bufferSize
));
checkCudaError(hipMalloc((void**)&buffer, bufferSize * sizeof(float)));
checkCuSparseError(hipsparseCsr2cscEx2(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
HIP_R_32F,
HIPSPARSE_ACTION_NUMERIC,
HIPSPARSE_INDEX_BASE_ZERO,
HIPSPARSE_CSR2CSC_ALG1,
buffer
));
checkCudaError(hipFree(buffer));
}
torch::Tensor spmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor values,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
hipLaunchKernelGGL(( spmm_test0), dim3(dim3(n_block,1,1)),dim3(dim3(k, row_per_block, 1)), 0, 0,
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+4-1)/4;
hipLaunchKernelGGL(( spmm_test1), dim3(dim3(n_block, tile_k, 1)), dim3(dim3(32, 4, 1)), 32*4*(sizeof(int)+sizeof(float)), 0,
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
hipLaunchKernelGGL(( spmm_test2), dim3(dim3(n_block, tile_k, 1)), dim3(dim3(32, 8, 1)), 32*8*(sizeof(int)+sizeof(float)), 0,
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
}
std::vector<torch::Tensor> csr2csc_cuda(
torch::Tensor csrRowPtr,
torch::Tensor csrColInd,
torch::Tensor csrVal)
{
const auto n = csrRowPtr.size(0) - 1;
const auto nnz = csrColInd.size(0);
auto devid = csrRowPtr.device().index();
auto optionsF = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto optionsI = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, devid);
auto cscColPtr = torch::empty({n + 1}, optionsI);
auto cscRowInd = torch::empty({nnz}, optionsI);
auto cscVal = torch::empty({nnz}, optionsF);
csr2cscKernel(n, n, nnz, csrRowPtr.data_ptr<int>(), csrColInd.data_ptr<int>(), csrVal.data_ptr<float>(),
cscColPtr.data_ptr<int>(), cscRowInd.data_ptr<int>(), cscVal.data_ptr<float>());
return {cscColPtr, cscRowInd, cscVal};
} | c3a5e0df5f94b5846100cf824a0cb0b503fb2d90.cu | #include <cuda.h>
#include <torch/types.h>
#include <cusparse.h>
#include "computeUtil.h"
__global__ void topoCacheCoarsenSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_indptr[rid];
int hb = A_indptr[rid+1];
int ptr = lb+threadIdx.x;
int offset;
float acc1 = sum_init();
float acc2 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
acc1 = sum_reduce(acc1, B[offset]);
acc2 = sum_reduce(acc2, B[(offset+32)]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
C[offset+32] = acc2;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
if (nout>1) {
acc2 = sum_reduce(acc2, B[(offset+32)]);}
// acc2 = sum_reduce(acc2, __ldg(B+offset+32));}
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
if (nout>1) {
C[offset+32] = acc2;}
}
}
}
__global__ void topoCacheSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
extern __shared__ int sh[];
int sm_offset = (threadIdx.y<<5);
int thread_idx = sm_offset + threadIdx.x;
int cid = (blockIdx.y<<5)+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
int offset;
int ptr = lb+threadIdx.x;
float acc1 = sum_init();
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[sm_offset+kk]+cid;
acc1 = sum_reduce(acc1, B[offset]);
// acc1 = sum_reduce(acc1, __ldg(B+offset));
}
__syncwarp();
}
offset = rid*k+cid;
C[offset] = acc1;
}
else { // threadIdx.y==blockDim.y-1
int nout = (k-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
sh[thread_idx] = A_indices[ptr]*k;
// sh[thread_idx] = __ldg(A_indices+ptr)*k;
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = sh[(sm_offset+kk)] + cid;
if (nout>0) {
acc1 = sum_reduce(acc1, B[offset]);}
// acc1 = sum_reduce(acc1, __ldg(B+offset)); }
}
__syncwarp();
}
offset = rid*k+cid;
if (nout>0) {
C[offset] = acc1;}
}
}
}
__global__ void topoSimpleSPMMKernel(
int m, int k, const int* A_indptr, const int* A_indices, const float* B, float* C
) {
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<m) {
int lb = A_indptr[rid];
int hb = A_indptr[(rid+1)];
float acc1 = sum_init();
int offset;
for (int ptr=lb; ptr<hb; ptr++) {
// offset = __ldg(A_indices+ptr)*k+threadIdx.x;
// acc1 = sum_reduce(acc1, __ldg(B+offset));
offset = A_indices[ptr]*k+threadIdx.x;
acc1 = sum_reduce(acc1, B[offset]);
}
C[(rid*k+threadIdx.x)] = acc1;
}
}
torch::Tensor spmm_cuda_no_edge_value(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
topoSimpleSPMMKernel<<< dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>(
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+3)/4;
topoCacheSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,4,1), 128*sizeof(int)>>>(
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
topoCacheCoarsenSPMMKernel<<< dim3(n_block,tile_k,1), dim3(32,8,1), 8*32*sizeof(int)>>>(
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
}
__global__ void spmm_test0(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int offset = 0;
float acc=0;
if (blockIdx.y!=gridDim.y-1){
for (int ptr = lb; ptr<hb; ptr++) {
offset = A_csrColInd[ptr]*B_ncols+cid;
acc += A_csrVal[ptr]*B_dnVal[offset];
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int ptr = lb; ptr<hb; ptr++) {
if (cid<B_ncols) {
offset = A_csrColInd[ptr]*B_ncols+cid;}
acc += A_csrVal[ptr]*B_dnVal[offset];
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;}
}
}
}
__global__ void spmm_test1(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<5)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc=0;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
__syncwarp();
}
C_dnVal[(rid*B_ncols+cid)] = acc;
}
else {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (cid<B_ncols) {
acc += val_sh[(shmem_offset+kk)]*B_dnVal[offset];
}
}
__syncwarp();
}
if (cid<B_ncols) {
C_dnVal[(rid*B_ncols+cid)] = acc;
}
}
}
}
__global__ void spmm_test2(
int A_nrows, int B_ncols,
int* A_csrRowPtr, int* A_csrColInd, float* A_csrVal,
float* B_dnVal, float* C_dnVal
)
{
extern __shared__ int sh[];
int *colInd_sh = sh;
float *val_sh = (float *)&sh[(blockDim.y<<5)];
int shmem_offset = (threadIdx.y<<5);
int thread_idx = shmem_offset+threadIdx.x;
int rid = blockDim.y*blockIdx.x+threadIdx.y;
if (rid<A_nrows) {
int cid = (blockIdx.y<<6)+threadIdx.x;
int lb = A_csrRowPtr[rid];
int hb = A_csrRowPtr[(rid+1)];
int ptr = lb+threadIdx.x;
int offset;
float acc1=0, acc2=0, val;
if (blockIdx.y != gridDim.y-1) {
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
offset = colInd_sh[(shmem_offset+kk)] + cid;
val = val_sh[(shmem_offset+kk)];
acc1 += val*B_dnVal[offset];
acc2 += val*B_dnVal[offset+32];
}
__syncwarp();
}
offset = rid*B_ncols+cid;
C_dnVal[offset] = acc1;
C_dnVal[offset+32] = acc2;
}
else {
int nout = (B_ncols-cid+31)/32;
for (int jj=lb; jj<hb; jj+=32) {
if (ptr<hb) {
val_sh[thread_idx] = A_csrVal[ptr];
colInd_sh[thread_idx] = B_ncols*A_csrColInd[ptr];
}
__syncwarp();
ptr += 32;
for (int kk=0; kk<32&&jj+kk<hb; kk++) {
val = val_sh[(shmem_offset+kk)];
offset = colInd_sh[(shmem_offset+kk)] + cid;
if (nout>0) {
acc1 += val*B_dnVal[offset];
}
if (nout>1) {
acc2 += val*B_dnVal[offset+32];
}
}
__syncwarp();
}
offset = rid*B_ncols+cid;
if (nout>0) {
C_dnVal[offset] = acc1;
}
if (nout>1) {
C_dnVal[(offset+32)] = acc2;
}
}
}
}
void csr2cscKernel(int m, int n, int nnz,
int *csrRowPtr, int *csrColInd, float *csrVal,
int *cscColPtr, int *cscRowInd, float *cscVal
)
{
cusparseHandle_t handle;
size_t bufferSize = 0;
void* buffer = NULL;
checkCuSparseError(cusparseCsr2cscEx2_bufferSize(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_32F,
CUSPARSE_ACTION_SYMBOLIC,
CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG1,
&bufferSize
));
checkCudaError(cudaMalloc((void**)&buffer, bufferSize * sizeof(float)));
checkCuSparseError(cusparseCsr2cscEx2(handle,
m,
n,
nnz,
csrVal,
csrRowPtr,
csrColInd,
cscVal,
cscColPtr,
cscRowInd,
CUDA_R_32F,
CUSPARSE_ACTION_NUMERIC,
CUSPARSE_INDEX_BASE_ZERO,
CUSPARSE_CSR2CSC_ALG1,
buffer
));
checkCudaError(cudaFree(buffer));
}
torch::Tensor spmm_cuda(
torch::Tensor rowptr,
torch::Tensor colind,
torch::Tensor values,
torch::Tensor dense
) {
const auto m = rowptr.size(0)-1;
const auto k = dense.size(1);
auto devid = dense.device().index();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto out = torch::empty({m,k}, options);
if (k<32) {
const int row_per_block = 128/k;
const int n_block = (m+row_per_block-1)/row_per_block;
spmm_test0<<<dim3(n_block,1,1),dim3(k, row_per_block, 1)>>>(
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
if (k<64) {
const int tile_k = (k+31)/32;
const int n_block = (m+4-1)/4;
spmm_test1<<<dim3(n_block, tile_k, 1), dim3(32, 4, 1), 32*4*(sizeof(int)+sizeof(float))>>> (
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
else {
const int tile_k = (k+63)/64;
const int n_block = (m+8-1)/8;
spmm_test2<<<dim3(n_block, tile_k, 1), dim3(32, 8, 1), 32*8*(sizeof(int)+sizeof(float))>>> (
m, k, rowptr.data_ptr<int>(), colind.data_ptr<int>(), values.data_ptr<float>(), dense.data_ptr<float>(), out.data_ptr<float>());
return out;
}
}
std::vector<torch::Tensor> csr2csc_cuda(
torch::Tensor csrRowPtr,
torch::Tensor csrColInd,
torch::Tensor csrVal)
{
const auto n = csrRowPtr.size(0) - 1;
const auto nnz = csrColInd.size(0);
auto devid = csrRowPtr.device().index();
auto optionsF = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid);
auto optionsI = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, devid);
auto cscColPtr = torch::empty({n + 1}, optionsI);
auto cscRowInd = torch::empty({nnz}, optionsI);
auto cscVal = torch::empty({nnz}, optionsF);
csr2cscKernel(n, n, nnz, csrRowPtr.data_ptr<int>(), csrColInd.data_ptr<int>(), csrVal.data_ptr<float>(),
cscColPtr.data_ptr<int>(), cscRowInd.data_ptr<int>(), cscVal.data_ptr<float>());
return {cscColPtr, cscRowInd, cscVal};
} |
cc54a8e3d085d8d6cc0cd2639b8eb3001140fb0a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ECVL - European Computer Vision Library
* Version: 1.0.0
* copyright (c) 2020, Universit degli Studi di Modena e Reggio Emilia (UNIMORE), AImageLab
* Authors:
* Costantino Grana ([email protected])
* Federico Bolelli ([email protected])
* Michele Cancilla ([email protected])
* Laura Canalini ([email protected])
* Stefano Allegretti ([email protected])
* All rights reserved.
*/
#include "test_cuda.h"
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ecvl/core/datatype.h"
using namespace ecvl;
#define ECVL_TUPLE(type, ...) \
__global__ void TestCpuToGpuKernel##type(const uint8_t* data, uint8_t* res) \
{ \
using type_t = TypeInfo_t<DataType::type>; \
const type_t* cur_data = reinterpret_cast<const type_t*>(data); \
*res = 1; \
if (*(cur_data++) != 50) *res = 0; \
if (*(cur_data++) != 32) *res = 0; \
if (*(cur_data++) != 14) *res = 0; \
if (*(cur_data++) != 60) *res = 0; \
} \
\
void RunTestCpuToGpuKernel##type(const uint8_t* data, uint8_t* res) \
{ \
hipLaunchKernelGGL(( TestCpuToGpuKernel##type), dim3(1),dim3(1), 0, 0, data, res); \
} \
\
__global__ void TestGpuToCpuKernel##type(uint8_t* data) { \
using type_t = TypeInfo_t<DataType::type>; \
type_t* cur_data = reinterpret_cast<type_t*>(data); \
*(cur_data++) = 50; \
*(cur_data++) = 32; \
*(cur_data++) = 14; \
*(cur_data++) = 60; \
} \
\
void RunTestGpuToCpuKernel##type(uint8_t* data) { \
hipLaunchKernelGGL(( TestGpuToCpuKernel##type), dim3(1),dim3(1), 0, 0, data); \
}
#include "ecvl/core/datatype_existing_tuples.inc.h"
#undef ECVL_TUPLE | cc54a8e3d085d8d6cc0cd2639b8eb3001140fb0a.cu | /*
* ECVL - European Computer Vision Library
* Version: 1.0.0
* copyright (c) 2020, UniversitÓ degli Studi di Modena e Reggio Emilia (UNIMORE), AImageLab
* Authors:
* Costantino Grana ([email protected])
* Federico Bolelli ([email protected])
* Michele Cancilla ([email protected])
* Laura Canalini ([email protected])
* Stefano Allegretti ([email protected])
* All rights reserved.
*/
#include "test_cuda.h"
#include <stdint.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include "ecvl/core/datatype.h"
using namespace ecvl;
#define ECVL_TUPLE(type, ...) \
__global__ void TestCpuToGpuKernel##type(const uint8_t* data, uint8_t* res) \
{ \
using type_t = TypeInfo_t<DataType::type>; \
const type_t* cur_data = reinterpret_cast<const type_t*>(data); \
*res = 1; \
if (*(cur_data++) != 50) *res = 0; \
if (*(cur_data++) != 32) *res = 0; \
if (*(cur_data++) != 14) *res = 0; \
if (*(cur_data++) != 60) *res = 0; \
} \
\
void RunTestCpuToGpuKernel##type(const uint8_t* data, uint8_t* res) \
{ \
TestCpuToGpuKernel##type<<<1,1>>>(data, res); \
} \
\
__global__ void TestGpuToCpuKernel##type(uint8_t* data) { \
using type_t = TypeInfo_t<DataType::type>; \
type_t* cur_data = reinterpret_cast<type_t*>(data); \
*(cur_data++) = 50; \
*(cur_data++) = 32; \
*(cur_data++) = 14; \
*(cur_data++) = 60; \
} \
\
void RunTestGpuToCpuKernel##type(uint8_t* data) { \
TestGpuToCpuKernel##type<<<1,1>>>(data); \
}
#include "ecvl/core/datatype_existing_tuples.inc.h"
#undef ECVL_TUPLE |
002649df41ce0e3ee60cd23ecba8721218090304.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//scan.cu
#include "comm.h"
#include "wtime.h"
#include <stdio.h>
#include "iostream"
#define max_thd 256
#define max_block 256
graph * mygraph;
__global__ void block_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
// vertex_t A = head[tid];
// vertex_t B = adj[tid];
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
//printf("find A %d B %d C %d\n",A,B,X);
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += gridDim.x*blockDim.x/256;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]+=val;
count[blockIdx.x]=val;
// if(val!=0)
// printf("+ %d\n",count[blockIdx.x]);
}
}
__global__ void warp_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
//if(i==0) printf("A %d B %d\n");
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
// tid += GPU_NUM* blockDim.x*gridDim.x/32;
tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
}
__global__ void init_count(index_t* count)
{
int tid = threadIdx.x;
count[tid] = 0;
}
__global__ void reduce_kernel(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void graph::initDevice(int GPU_id,int Part_id){
//cuda memory copy of partAdj and partBegin
// hipSetDevice(GPU_id);
hipSetDevice(4);
int P=Part_id;
H_ERR(hipDeviceSynchronize() );
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
Edge* buffer0;
Edge* buffer1;
index_t EdgeCount = partEdgeCount[P];
vertex_t* Adj = partAdj[P];
index_t* Begin = partBegin[P];
H_ERR(hipMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) );
H_ERR(hipMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) );
H_ERR(hipMalloc(&buffer0, BufferSize*sizeof(Edge)) );
H_ERR(hipMalloc(&buffer1, BufferSize*sizeof(Edge)) );
gdata[GPU_id].adj = dev_adj;
gdata[GPU_id].begin = dev_begin;
gdata[GPU_id].count = dev_count;
gdata[GPU_id].EdgeBuffer[0]= buffer0;
gdata[GPU_id].EdgeBuffer[1]= buffer1;
gdata[GPU_id].partition_id = P;
gdata[GPU_id].currentBuffer= 0;
hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count);
}
void graph::DeviceCompute(int GPU_id, index_t Chunk_id){
int P = gdata[GPU_id].partition_id;
// if(ds_status[P][Chunk_id]!=0) return;
// ds_status[P][Chunk_id]=1;
// if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1;
//control
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
Edge* buffer =gdata[GPU_id].EdgeBuffer[gdata[GPU_id].currentBuffer%2];
gdata[GPU_id].currentBuffer =1-gdata[GPU_id].currentBuffer;
index_t currentBufferSize = BufferSize;
if(Chunk_id==upperEdgeCount/BufferSize){
currentBufferSize = upperEdgeCount % BufferSize;
}
hipLaunchKernelGGL(( init_count) , dim3(1),dim3(max_thd), 0, 0, dev_count);
H_ERR(hipMemcpy(buffer, &OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), hipMemcpyHostToDevice) );
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0,
buffer,
dev_adj,
dev_begin,
0,
// GPU_id*256*256/32,
currentBufferSize,
dev_count
);
//write the result of this chunk back
H_ERR(hipDeviceSynchronize() );
index_t tempcount[max_block];
index_t mycount=0;
H_ERR(hipMemcpy(tempcount, dev_count, max_block*sizeof(index_t), hipMemcpyDeviceToHost));
for(int i=0; i<max_block; i++) mycount += tempcount[i];
ds_count[P][Chunk_id] = mycount;
}
void graph::gpuReduce(int GPU_id){
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
Edge** buffer =gdata[GPU_id].EdgeBuffer;
// H_ERR(hipDeviceSynchronize() );
// reduce_kernel <<<1,max_thd>>>(dev_count);
// H_ERR(hipMemcpy(&count[GPU_id], dev_count, sizeof(index_t), hipMemcpyDeviceToHost));
// thd_count += count[i];
// count[i] = thd_count;
H_ERR(hipFree(dev_adj) );
H_ERR(hipFree(dev_begin) );
H_ERR(hipFree(dev_count) );
H_ERR(hipFree(buffer[0]) );
H_ERR(hipFree(buffer[1]) );
// cout<<"GPU "<<GPU_id<<" finished"<<endl;
}
void graph::gpuProc(int GPU_id){
double t0 = wtime();
index_t total_count=0;
for(int P=0; P<PART_NUM; P++){
// int P = GPU_id/4;
// if(PART_NUM > 1) int P = GPU_id%PART_NUM;
initDevice(GPU_id,P);
index_t chunk_per_gpu = (ChunkNum-1)/GPU_NUM + 1;
for(index_t i=GPU_id*chunk_per_gpu; i<(GPU_id+1)*chunk_per_gpu; i++ ){
// for(index_t i=GPU_id; i<ChunkNum; i+=GPU_NUM ){
// for(index_t i=GPU_id; i<ChunkNum; i+= 8 ){
// if(i%8<6)
if(i<ChunkNum)
DeviceCompute(GPU_id,i);
}
gpuReduce(GPU_id);
total_count += count[GPU_id];
}
count[GPU_id] = total_count;
double t1 = wtime();
cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl;
}
| 002649df41ce0e3ee60cd23ecba8721218090304.cu | //scan.cu
#include "comm.h"
#include "wtime.h"
#include <stdio.h>
#include "iostream"
#define max_thd 256
#define max_block 256
graph * mygraph;
__global__ void block_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
// vertex_t A = head[tid];
// vertex_t B = adj[tid];
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
//printf("find A %d B %d C %d\n",A,B,X);
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += gridDim.x*blockDim.x/256;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]+=val;
count[blockIdx.x]=val;
// if(val!=0)
// printf("+ %d\n",count[blockIdx.x]);
}
}
__global__ void warp_binary_kernel
( //vertex_t* head,
//vertex_t* adj,
Edge* workload,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = workload[tid].A;
vertex_t B = workload[tid].B;
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
//if(i==0) printf("A %d B %d\n");
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
//printf("find A %d B %d C %d\n",A,B,X);
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
// tid += GPU_NUM* blockDim.x*gridDim.x/32;
tid += blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
// count[blockIdx.x]=val;
count[blockIdx.x]+=val;
}
__syncthreads();
}
__global__ void init_count(index_t* count)
{
int tid = threadIdx.x;
count[tid] = 0;
}
__global__ void reduce_kernel(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void graph::initDevice(int GPU_id,int Part_id){
//cuda memory copy of partAdj and partBegin
// cudaSetDevice(GPU_id);
cudaSetDevice(4);
int P=Part_id;
H_ERR(cudaDeviceSynchronize() );
vertex_t* dev_adj;
index_t* dev_begin;
index_t* dev_count;
Edge* buffer0;
Edge* buffer1;
index_t EdgeCount = partEdgeCount[P];
vertex_t* Adj = partAdj[P];
index_t* Begin = partBegin[P];
H_ERR(cudaMalloc(&dev_adj, EdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) );
H_ERR(cudaMemcpy(dev_adj, Adj, EdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(dev_begin, Begin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMalloc(&buffer0, BufferSize*sizeof(Edge)) );
H_ERR(cudaMalloc(&buffer1, BufferSize*sizeof(Edge)) );
gdata[GPU_id].adj = dev_adj;
gdata[GPU_id].begin = dev_begin;
gdata[GPU_id].count = dev_count;
gdata[GPU_id].EdgeBuffer[0]= buffer0;
gdata[GPU_id].EdgeBuffer[1]= buffer1;
gdata[GPU_id].partition_id = P;
gdata[GPU_id].currentBuffer= 0;
init_count <<<1,max_thd>>>(dev_count);
}
void graph::DeviceCompute(int GPU_id, index_t Chunk_id){
int P = gdata[GPU_id].partition_id;
// if(ds_status[P][Chunk_id]!=0) return;
// ds_status[P][Chunk_id]=1;
// if(ds_progress[P]<Chunk_id+1) ds_progress[P] = Chunk_id+1;
//control
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
Edge* buffer =gdata[GPU_id].EdgeBuffer[gdata[GPU_id].currentBuffer%2];
gdata[GPU_id].currentBuffer =1-gdata[GPU_id].currentBuffer;
index_t currentBufferSize = BufferSize;
if(Chunk_id==upperEdgeCount/BufferSize){
currentBufferSize = upperEdgeCount % BufferSize;
}
init_count <<<1,max_thd>>>(dev_count);
H_ERR(cudaMemcpy(buffer, &OrientedEdge[Chunk_id*BufferSize], currentBufferSize*sizeof(Edge), cudaMemcpyHostToDevice) );
H_ERR(cudaDeviceSynchronize() );
warp_binary_kernel<<<max_block,max_thd>>>
( buffer,
dev_adj,
dev_begin,
0,
// GPU_id*256*256/32,
currentBufferSize,
dev_count
);
//write the result of this chunk back
H_ERR(cudaDeviceSynchronize() );
index_t tempcount[max_block];
index_t mycount=0;
H_ERR(cudaMemcpy(tempcount, dev_count, max_block*sizeof(index_t), cudaMemcpyDeviceToHost));
for(int i=0; i<max_block; i++) mycount += tempcount[i];
ds_count[P][Chunk_id] = mycount;
}
void graph::gpuReduce(int GPU_id){
vertex_t* dev_adj =gdata[GPU_id].adj;
index_t* dev_begin =gdata[GPU_id].begin;
index_t* dev_count =gdata[GPU_id].count;
Edge** buffer =gdata[GPU_id].EdgeBuffer;
// H_ERR(cudaDeviceSynchronize() );
// reduce_kernel <<<1,max_thd>>>(dev_count);
// H_ERR(cudaMemcpy(&count[GPU_id], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost));
// thd_count += count[i];
// count[i] = thd_count;
H_ERR(cudaFree(dev_adj) );
H_ERR(cudaFree(dev_begin) );
H_ERR(cudaFree(dev_count) );
H_ERR(cudaFree(buffer[0]) );
H_ERR(cudaFree(buffer[1]) );
// cout<<"GPU "<<GPU_id<<" finished"<<endl;
}
void graph::gpuProc(int GPU_id){
double t0 = wtime();
index_t total_count=0;
for(int P=0; P<PART_NUM; P++){
// int P = GPU_id/4;
// if(PART_NUM > 1) int P = GPU_id%PART_NUM;
initDevice(GPU_id,P);
index_t chunk_per_gpu = (ChunkNum-1)/GPU_NUM + 1;
for(index_t i=GPU_id*chunk_per_gpu; i<(GPU_id+1)*chunk_per_gpu; i++ ){
// for(index_t i=GPU_id; i<ChunkNum; i+=GPU_NUM ){
// for(index_t i=GPU_id; i<ChunkNum; i+= 8 ){
// if(i%8<6)
if(i<ChunkNum)
DeviceCompute(GPU_id,i);
}
gpuReduce(GPU_id);
total_count += count[GPU_id];
}
count[GPU_id] = total_count;
double t1 = wtime();
cout<<"GPU "<<GPU_id<<" time = "<<t1-t0<<endl;
}
|
387fa693c59352b5a4b590ff43f72c2f01b14255.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Basic CUDA based triangle mesh path tracer.
* For background info, see http://raytracey.blogspot.co.nz/2015/12/gpu-path-tracing-tutorial-2-interactive.html
* Based on CUDA ray tracing code from http://cg.alexandra.dk/?p=278
* Copyright (C) 2015 Sam Lapere
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\cuda.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\math_functions.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\hip/hip_vector_types.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\vector_functions.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\device_launch_parameters.h"
#include "cutil_math.h" // required for float3 vector math
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\extras\CUPTI\include\GL\glew.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\extras\CUPTI\include\GL\glut.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\cuda_runtime.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\cuda_gl_interop.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\hiprand/hiprand.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\hiprand/hiprand_kernel.h"
#define M_PI 3.14159265359f
#define width 1024 // screenwidth
#define height 576 // screenheight
#define samps 1 // samples per pixel per pass
int total_number_of_triangles = 0;
int frames = 0;
// scene bounding box
float3 scene_aabbox_min;
float3 scene_aabbox_max;
// the scene triangles are stored in a 1D CUDA texture of float4 for memory alignment
// store two edges instead of vertices
// each triangle is stored as three float4s: (float4 first_vertex, float4 edge1, float4 edge2)
texture<float4, 1, hipReadModeElementType> triangle_texture;
// hardcoded camera position
__device__ float3 firstcamorig = { 50, 52, 295.6 };
// OpenGL vertex buffer object for real-time viewport
GLuint vbo;
void *d_vbo_buffer = NULL;
struct Ray {
float3 orig; // ray origin
float3 dir; // ray direction
__device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {}
};
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance(), only DIFF used here
// SPHERES
struct Sphere {
float rad; // radius
float3 pos, emi, col; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ float intersect(const Ray &r) const { // returns distance, 0 if nohit
// Ray/sphere intersection
// Quadratic formula required to solve ax^2 + bx + c = 0
// Solution x = (-b +- sqrt(b*b - 4ac)) / 2a
// Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = dot(op, r.dir);
float disc = b*b - dot(op, op) + rad*rad; // discriminant
if (disc<0) return 0; else disc = sqrtf(disc);
return (t = b - disc)>epsilon ? t : ((t = b + disc)>epsilon ? t : 0);
}
};
// TRIANGLES
// the classic ray triangle intersection: http://www.cs.virginia.edu/~gfx/Courses/2003/ImageSynthesis/papers/Acceleration/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf
// for an explanation see http://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/moller-trumbore-ray-triangle-intersection
__device__ float RayTriangleIntersection(const Ray &r,
const float3 &v0,
const float3 &edge1,
const float3 &edge2)
{
float3 tvec = r.orig - v0;
float3 pvec = cross(r.dir, edge2);
float det = dot(edge1, pvec);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(tvec, pvec) * det;
if (u < 0.0f || u > 1.0f)
return -1.0f;
float3 qvec = cross(tvec, edge1);
float v = dot(r.dir, qvec) * det;
if (v < 0.0f || (u + v) > 1.0f)
return -1.0f;
return dot(edge2, qvec) * det;
}
__device__ float3 getTriangleNormal(const int triangleIndex){
float4 edge1 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 2);
// cross product of two triangle edges yields a vector orthogonal to triangle plane
float3 trinormal = cross(make_float3(edge1.x, edge1.y, edge1.z), make_float3(edge2.x, edge2.y, edge2.z));
trinormal = normalize(trinormal);
return trinormal;
}
__device__ void intersectAllTriangles(const Ray& r, float& t_scene, int& triangle_id, const int number_of_triangles, int& geomtype){
for (int i = 0; i < number_of_triangles; i++)
{
// the triangles are packed into the 1D texture using three consecutive float4 structs for each triangle,
// first float4 contains the first vertex, second float4 contains the first precomputed edge, third float4 contains second precomputed edge like this:
// (float4(vertex.x,vertex.y,vertex.z, 0), float4 (egde1.x,egde1.y,egde1.z,0),float4 (egde2.x,egde2.y,egde2.z,0))
// i is triangle index, each triangle represented by 3 float4s in triangle_texture
float4 v0 = tex1Dfetch(triangle_texture, i * 3);
float4 edge1 = tex1Dfetch(triangle_texture, i * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, i * 3 + 2);
// intersect ray with reconstructed triangle
float t = RayTriangleIntersection(r,
make_float3(v0.x, v0.y, v0.z),
make_float3(edge1.x, edge1.y, edge1.z),
make_float3(edge2.x, edge2.y, edge2.z));
// keep track of closest distance and closest triangle
// if ray/tri intersection finds an intersection point that is closer than closest intersection found so far
if (t < t_scene && t > 0.001)
{
t_scene = t;
triangle_id = i;
geomtype = 3;
}
}
}
// AXIS ALIGNED BOXES
// helper functions
inline __device__ float3 minf3(float3 a, float3 b){ return make_float3(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z); }
inline __device__ float3 maxf3(float3 a, float3 b){ return make_float3(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z); }
inline __device__ float minf1(float a, float b){ return a < b ? a : b; }
inline __device__ float maxf1(float a, float b){ return a > b ? a : b; }
struct Box {
float3 min; // minimum bounds
float3 max; // maximum bounds
float3 emi; // emission
float3 col; // colour
Refl_t refl; // material type
// ray/box intersection
// for theoretical background of the algorithm see
// http://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
// optimised code from http://www.gamedev.net/topic/495636-raybox-collision-intersection-point/
__device__ float intersect(const Ray &r) const {
float epsilon = 0.001f; // required to prevent self intersection
float3 tmin = (min - r.orig) / r.dir;
float3 tmax = (max - r.orig) / r.dir;
float3 real_min = minf3(tmin, tmax);
float3 real_max = maxf3(tmin, tmax);
float minmax = minf1(minf1(real_max.x, real_max.y), real_max.z);
float maxmin = maxf1(maxf1(real_min.x, real_min.y), real_min.z);
if (minmax >= maxmin) { return maxmin > epsilon ? maxmin : 0; }
else return 0;
}
// calculate normal for point on axis aligned box
__device__ float3 Box::normalAt(float3 &point) {
float3 normal = make_float3(0.f, 0.f, 0.f);
float min_distance = 1e8;
float distance;
float epsilon = 0.001f;
if (fabs(min.x - point.x) < epsilon) normal = make_float3(-1, 0, 0);
else if (fabs(max.x - point.x) < epsilon) normal = make_float3(1, 0, 0);
else if (fabs(min.y - point.y) < epsilon) normal = make_float3(0, -1, 0);
else if (fabs(max.y - point.y) < epsilon) normal = make_float3(0, 1, 0);
else if (fabs(min.z - point.z) < epsilon) normal = make_float3(0, 0, -1);
else normal = make_float3(0, 0, 1);
return normal;
}
};
// scene: 9 spheres forming a Cornell box
// small enough to fit in constant GPU memory
__constant__ Sphere spheres[] = {
// FORMAT: { float radius, float3 position, float3 emission, float3 colour, Refl_t material }
// cornell box
//{ 1e5f, { 1e5f + 1.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { 0.75f, 0.25f, 0.25f }, DIFF }, //Left 1e5f
//{ 1e5f, { -1e5f + 99.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .25f, .25f, .75f }, DIFF }, //Right
//{ 1e5f, { 50.0f, 40.8f, 1e5f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Back
//{ 1e5f, { 50.0f, 40.8f, -1e5f + 600.0f }, { 0.0f, 0.0f, 0.0f }, { 0.00f, 0.00f, 0.00f }, DIFF }, //Front
//{ 1e5f, { 50.0f, -1e5f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Bottom
//{ 1e5f, { 50.0f, -1e5f + 81.6f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Top
//{ 16.5f, { 27.0f, 16.5f, 47.0f }, { 0.0f, 0.0f, 0.0f }, { 0.99f, 0.99f, 0.99f }, SPEC }, // small sphere 1
//{ 16.5f, { 73.0f, 16.5f, 78.0f }, { 0.0f, 0.f, .0f }, { 0.09f, 0.49f, 0.3f }, REFR }, // small sphere 2
//{ 600.0f, { 50.0f, 681.6f - .5f, 81.6f }, { 3.0f, 2.5f, 2.0f }, { 0.0f, 0.0f, 0.0f }, DIFF } // Light 12, 10 ,8
//outdoor scene: radius, position, emission, color, material
//{ 1600, { 3000.0f, 10, 6000 }, { 37, 34, 30 }, { 0.f, 0.f, 0.f }, DIFF }, // 37, 34, 30 // sun
//{ 1560, { 3500.0f, 0, 7000 }, { 50, 25, 2.5 }, { 0.f, 0.f, 0.f }, DIFF }, // 150, 75, 7.5 // sun 2
{ 10000, { 50.0f, 40.8f, -1060 }, { 0.0003, 0.01, 0.15 }, { 0.175f, 0.175f, 0.25f }, DIFF }, // sky
{ 100000, { 50.0f, -100000, 0 }, { 0.0, 0.0, 0 }, { 0.8f, 0.2f, 0.f }, DIFF }, // ground
{ 110000, { 50.0f, -110048.5, 0 }, { 3.6, 2.0, 0.2 }, { 0.f, 0.f, 0.f }, DIFF }, // horizon brightener
{ 4e4, { 50.0f, -4e4 - 30, -3000 }, { 0, 0, 0 }, { 0.2f, 0.2f, 0.2f }, DIFF }, // mountains
{ 82.5, { 30.0f, 180.5, 42 }, { 16, 12, 6 }, { .6f, .6f, 0.6f }, DIFF }, // small sphere 1
{ 12, { 115.0f, 10, 105 }, { 0.0, 0.0, 0.0 }, { 0.9f, 0.9f, 0.9f }, REFR }, // small sphere 2
{ 22, { 65.0f, 22, 24 }, { 0, 0, 0 }, { 0.9f, 0.9f, 0.9f }, SPEC }, // small sphere 3
};
__constant__ Box boxes[] = {
// FORMAT: { float3 minbounds, float3 maxbounds, float3 emission, float3 colour, Refl_t }
{ { 5.0f, 0.0f, 70.0f }, { 45.0f, 11.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ { 85.0f, 0.0f, 95.0f }, { 95.0f, 20.0f, 105.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ { 75.0f, 20.0f, 85.0f }, { 105.0f, 22.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
};
__device__ inline bool intersect_scene(const Ray &r, float &t, int &sphere_id, int &box_id, int& triangle_id, const int number_of_triangles, int &geomtype, const float3& bbmin, const float3& bbmax){
float tmin = 1e20;
float tmax = -1e20;
float d = 1e21;
float k = 1e21;
float q = 1e21;
float inf = t = 1e20;
// SPHERES
// intersect all spheres in the scene
float numspheres = sizeof(spheres) / sizeof(Sphere);
for (int i = int(numspheres); i--;) // for all spheres in scene
// keep track of distance from origin to closest intersection point
if ((d = spheres[i].intersect(r)) && d < t){ t = d; sphere_id = i; geomtype = 1; }
// BOXES
// intersect all boxes in the scene
float numboxes = sizeof(boxes) / sizeof(Box);
for (int i = int(numboxes); i--;) // for all boxes in scene
if ((k = boxes[i].intersect(r)) && k < t){ t = k; box_id = i; geomtype = 2; }
// TRIANGLES
Box scene_bbox; // bounding box around triangle meshes
scene_bbox.min = bbmin;
scene_bbox.max = bbmax;
// if ray hits bounding box of triangle meshes, intersect ray with all triangles
if (scene_bbox.intersect(r)){
intersectAllTriangles(r, t, triangle_id, number_of_triangles, geomtype);
}
// t is distance to closest intersection of ray with all primitives in the scene (spheres, boxes and triangles)
return t<inf;
}
// hash function to calculate new seed for each frame
// see http://www.reedbeta.com/blog/2013/01/12/quick-and-easy-gpu-random-numbers-in-d3d11/
uint WangHash(uint a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
// radiance function
// compute path bounces in scene and accumulate returned color from each path sgment
__device__ float3 radiance(Ray &r, hiprandState_t *randstate, const int totaltris, const float3& scene_aabb_min, const float3& scene_aabb_max){ // returns ray color
// colour mask
float3 mask = make_float3(1.0f, 1.0f, 1.0f);
// accumulated colour
float3 accucolor = make_float3(0.0f, 0.0f, 0.0f);
for (int bounces = 0; bounces < 5; bounces++){ // iteration up to 4 bounces (instead of recursion in CPU code)
// reset scene intersection function parameters
float t = 100000; // distance to intersection
int sphere_id = -1;
int box_id = -1; // index of intersected sphere
int triangle_id = -1;
int geomtype = -1;
float3 f; // primitive colour
float3 emit; // primitive emission colour
float3 x; // intersection point
float3 n; // normal
float3 nl; // oriented normal
float3 d; // ray direction of next path segment
Refl_t refltype;
// intersect ray with scene
// intersect_scene keeps track of closest intersected primitive and distance to closest intersection point
if (!intersect_scene(r, t, sphere_id, box_id, triangle_id, totaltris, geomtype, scene_aabb_min, scene_aabb_max))
return make_float3(0.0f, 0.0f, 0.0f); // if miss, return black
// else: we've got a hit with a scene primitive
// determine geometry type of primitive: sphere/box/triangle
// if sphere:
if (geomtype == 1){
Sphere &sphere = spheres[sphere_id]; // hit object with closest intersection
x = r.orig + r.dir*t; // intersection point on object
n = normalize(x - sphere.pos); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = sphere.col; // object colour
refltype = sphere.refl;
emit = sphere.emi; // object emission
accucolor += (mask * emit);
}
// if box:
if (geomtype == 2){
Box &box = boxes[box_id];
x = r.orig + r.dir*t; // intersection point on object
n = normalize(box.normalAt(x)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = box.col; // box colour
refltype = box.refl;
emit = box.emi; // box emission
accucolor += (mask * emit);
}
// if triangle:
if (geomtype == 3){
int tri_index = triangle_id;
x = r.orig + r.dir*t; // intersection point
n = normalize(getTriangleNormal(tri_index)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
// colour, refltype and emit value are hardcoded and apply to all triangles
// no per triangle material support yet
f = make_float3(0.9f, 0.4f, 0.1f); // triangle colour
refltype = REFR;
emit = make_float3(0.0f, 0.0f, 0.0f);
accucolor += (mask * emit);
}
// SHADING: diffuse, specular or refractive
// ideal diffuse reflection (see "Realistic Ray Tracing", P. Shirley)
if (refltype == DIFF){
// create 2 random numbers
float r1 = 2 * M_PI * hiprand_uniform(randstate);
float r2 = hiprand_uniform(randstate);
float r2s = sqrtf(r2);
// compute orthonormal coordinate frame uvw with hitpoint as origin
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
// compute cosine weighted random ray direction on hemisphere
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
// offset origin next path segment to prevent self intersection
x += nl * 0.03;
// multiply mask with colour of object
mask *= f;
}
// ideal specular reflection (mirror)
if (refltype == SPEC){
// compute relfected ray direction according to Snell's law
d = r.dir - 2.0f * n * dot(n, r.dir);
// offset origin next path segment to prevent self intersection
x += nl * 0.01f;
// multiply mask with colour of object
mask *= f;
}
// ideal refraction (based on smallpt code by Kevin Beason)
if (refltype == REFR){
bool into = dot(n, nl) > 0; // is ray entering or leaving refractive material?
float nc = 1.0f; // Index of Refraction air
float nt = 1.5f; // Index of Refraction glass/water
float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials
float ddn = dot(r.dir, nl);
float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn);
if (cos2t < 0.0f) // total internal reflection
{
d = reflect(r.dir, n); //d = r.dir - 2.0f * n * dot(n, r.dir);
x += nl * 0.01f;
}
else // cos2t > 0
{
// compute direction of transmission ray
float3 tdir = normalize(r.dir * nnt - n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t))));
float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc);
float c = 1.f - (into ? -ddn : dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
// randomly choose reflection or transmission ray
if (hiprand_uniform(randstate) < 0.25) // reflection ray
{
mask *= RP;
d = reflect(r.dir, n);
x += nl * 0.02f;
}
else // transmission ray
{
mask *= TP;
d = tdir; //r = Ray(x, tdir);
x += nl * 0.0005f; // epsilon must be small to avoid artefacts
}
}
}
// set up origin and direction of next path segment
r.orig = x;
r.dir = d;
}
// add radiance up to a certain ray depth
// return accumulated ray colour after all bounces are computed
return accucolor;
}
// required to convert colour to a format that OpenGL can display
union Colour // 4 bytes = 4 chars = 1 float
{
float c;
uchar4 components;
};
__global__ void render_kernel(float3 *output, float3* accumbuffer, const int numtriangles, int framenumber, uint hashedframenumber, float3 scene_bbmin, float3 scene_bbmax){ // float3 *gputexdata1, int *texoffsets
// assign a CUDA thread to every pixel by using the threadIndex
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// global threadId, see richiesams blogspot
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
// create random number generator, see RichieSams blogspot
hiprandState_t randState; // state of the random number generator, to prevent repetition
hiprand_init(hashedframenumber + threadId, 0, 0, &randState);
Ray cam(firstcamorig, normalize(make_float3(0, -0.042612, -1)));
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset along X-axis
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray dir offset along Y-axis, .5135 is FOV angle
float3 pixelcol; // final pixel color
int i = (height - y - 1)*width + x; // pixel index
pixelcol = make_float3(0.0f, 0.0f, 0.0f); // reset to zero for every pixel
for (int s = 0; s < samps; s++){
// compute primary ray direction
float3 d = cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5) + cam.dir;
// normalize primary ray direction
d = normalize(d);
// add accumulated colour from path bounces
pixelcol += radiance(Ray(cam.orig + d * 40, d), &randState, numtriangles, scene_bbmin, scene_bbmax)*(1. / samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
// add pixel colour to accumulation buffer (accumulates all samples)
accumbuffer[i] += pixelcol;
// averaged colour: divide colour by the number of calculated frames so far
float3 tempcol = accumbuffer[i] / framenumber;
Colour fcolour;
float3 colour = make_float3(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f));
// convert from 96-bit to 24-bit colour + perform gamma correction
fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / 2.2f) * 255), (unsigned char)(powf(colour.y, 1 / 2.2f) * 255), (unsigned char)(powf(colour.z, 1 / 2.2f) * 255), 1);
// store pixel coordinates and pixelcolour in OpenGL readable outputbuffer
output[i] = make_float3(x, y, fcolour.c);
}
void Timer(int obsolete) {
glutPostRedisplay();
glutTimerFunc(30, Timer, 0);
}
__device__ float timer = 0.0f;
inline float clamp(float x){ return x<0 ? 0 : x>1 ? 1 : x; }
//inline int toInt(float x){ return int(pow(clamp(x), 1 / 2.2) * 255 + .5); } // RGB float in range [0,1] to int in range [0, 255]
// buffer for accumulating samples over several frames
float3* accumulatebuffer;
// output buffer
float3 *dptr;
void disp(void)
{
frames++;
hipDeviceSynchronize();
// map vertex buffer object for acces by CUDA
hipGLMapBufferObject__((void**)&dptr, vbo);
//clear all pixels:
glClear(GL_COLOR_BUFFER_BIT);
// RAY TRACING:
// dim3 grid(WINDOW / block.x, WINDOW / block.y, 1);
// dim3 CUDA specific syntax, block and grid are required to schedule CUDA threads over streaming multiprocessors
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
// launch CUDA path tracing kernel, pass in a hashed seed based on number of frames
render_kernel << < grid, block >> >(dptr, accumulatebuffer, total_number_of_triangles, frames, WangHash(frames), scene_aabbox_max, scene_aabbox_min); // launches CUDA render kernel from the host
hipDeviceSynchronize();
// unmap buffer
hipGLUnmapBufferObject(vbo);
//glFlush();
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
//glutPostRedisplay();
}
// load triangle data in a CUDA texture
extern "C"
{
void bindTriangles(float *dev_triangle_p, unsigned int number_of_triangles)
{
triangle_texture.normalized = false; // access with normalized texture coordinates
triangle_texture.filterMode = hipFilterModePoint; // Point mode, so no
triangle_texture.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates
size_t size = sizeof(float4)*number_of_triangles * 3;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
hipBindTexture(0, triangle_texture, dev_triangle_p, channelDesc, size);
}
}
// helpers to load triangle data
struct TriangleFace
{
int v[3]; // vertex indices
};
struct TriangleMesh
{
std::vector<float3> verts;
std::vector<TriangleFace> faces;
float3 bounding_box[2];
};
TriangleMesh mesh1;
TriangleMesh mesh2;
float *dev_triangle_p; // the cuda device pointer that points to the uploaded triangles
void loadObj(const std::string filename, TriangleMesh &mesh); // forward declaration
// 1. load triangle mesh data from obj files
// 2. copy data to CPU memory (into vector<float4> triangles)
// 3. copy to CUDA global memory (allocated with dev_triangle_p pointer)
// 4. copy to CUDA texture memory with bindtriangles()
void initCUDAmemoryTriMesh()
{
loadObj("data/bunny.obj", mesh1);
loadObj("data/bunny.obj", mesh2);
// scalefactor and offset to position/scale triangle meshes
float scalefactor1 = 200;
float scalefactor2 = 300; // 300
float3 offset1 = make_float3(90, 22, 100);// (30, -2, 80);
float3 offset2 = make_float3(30, -2, 80);
std::vector<float4> triangles;
for (unsigned int i = 0; i < mesh1.faces.size(); i++)
{
// make a local copy of the triangle vertices
float3 v0 = mesh1.verts[mesh1.faces[i].v[0] - 1];
float3 v1 = mesh1.verts[mesh1.faces[i].v[1] - 1];
float3 v2 = mesh1.verts[mesh1.faces[i].v[2] - 1];
// scale
v0 *= scalefactor1;
v1 *= scalefactor1;
v2 *= scalefactor1;
// translate
v0 += offset1;
v1 += offset1;
v2 += offset1;
// store triangle data as float4
// store two edges per triangle instead of vertices, to save some calculations in the
// ray triangle intersection test
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 0));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
// compute bounding box of this mesh
mesh1.bounding_box[0] *= scalefactor1; mesh1.bounding_box[0] += offset1;
mesh1.bounding_box[1] *= scalefactor1; mesh1.bounding_box[1] += offset1;
for (unsigned int i = 0; i < mesh2.faces.size(); i++)
{
float3 v0 = mesh2.verts[mesh2.faces[i].v[0] - 1];
float3 v1 = mesh2.verts[mesh2.faces[i].v[1] - 1];
float3 v2 = mesh2.verts[mesh2.faces[i].v[2] - 1];
v0 *= scalefactor2;
v1 *= scalefactor2;
v2 *= scalefactor2;
v0 += offset2;
v1 += offset2;
v2 += offset2;
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 1));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
mesh2.bounding_box[0] *= scalefactor2; mesh2.bounding_box[0] += offset2;
mesh2.bounding_box[1] *= scalefactor2; mesh2.bounding_box[1] += offset2;
std::cout << "total number of triangles check:" << mesh1.faces.size() + mesh2.faces.size() << " == " << triangles.size() / 3 << std::endl;
// calculate total number of triangles in the scene
size_t triangle_size = triangles.size() * sizeof(float4);
int total_num_triangles = triangles.size() / 3;
total_number_of_triangles = total_num_triangles;
if (triangle_size > 0)
{
// allocate memory for the triangle meshes on the GPU
hipMalloc((void **)&dev_triangle_p, triangle_size);
// copy triangle data to GPU
hipMemcpy(dev_triangle_p, &triangles[0], triangle_size, hipMemcpyHostToDevice);
// load triangle data into a CUDA texture
bindTriangles(dev_triangle_p, total_num_triangles);
}
// compute scene bounding box by merging bounding boxes of individual meshes
scene_aabbox_min = mesh2.bounding_box[0];
scene_aabbox_max = mesh2.bounding_box[1];
scene_aabbox_min = fminf(scene_aabbox_min, mesh1.bounding_box[0]);
scene_aabbox_max = fmaxf(scene_aabbox_max, mesh1.bounding_box[1]);
}
// read triangle data from obj file
void loadObj(const std::string filename, TriangleMesh &mesh)
{
std::ifstream in(filename.c_str());
if (!in.good())
{
std::cout << "ERROR: loading obj:(" << filename << ") file not found or not good" << "\n";
system("PAUSE");
exit(0);
}
char buffer[256], str[255];
float f1, f2, f3;
while (!in.getline(buffer, 255).eof())
{
buffer[255] = '\0';
sscanf_s(buffer, "%s", str, 255);
// reading a vertex
if (buffer[0] == 'v' && (buffer[1] == ' ' || buffer[1] == 32)){
if (sscanf(buffer, "v %f %f %f", &f1, &f2, &f3) == 3){
mesh.verts.push_back(make_float3(f1, f2, f3));
}
else{
std::cout << "ERROR: vertex not in wanted format in OBJLoader" << "\n";
exit(-1);
}
}
// reading faceMtls
else if (buffer[0] == 'f' && (buffer[1] == ' ' || buffer[1] == 32))
{
TriangleFace f;
int nt = sscanf(buffer, "f %d %d %d", &f.v[0], &f.v[1], &f.v[2]);
if (nt != 3){
std::cout << "ERROR: I don't know the format of that FaceMtl" << "\n";
exit(-1);
}
mesh.faces.push_back(f);
}
}
// calculate the bounding box of the mesh
mesh.bounding_box[0] = make_float3(1000000, 1000000, 1000000);
mesh.bounding_box[1] = make_float3(-1000000, -1000000, -1000000);
for (unsigned int i = 0; i < mesh.verts.size(); i++)
{
//update min and max value
mesh.bounding_box[0] = fminf(mesh.verts[i], mesh.bounding_box[0]);
mesh.bounding_box[1] = fmaxf(mesh.verts[i], mesh.bounding_box[1]);
}
std::cout << "obj file loaded: number of faces:" << mesh.faces.size() << " number of vertices:" << mesh.verts.size() << std::endl;
std::cout << "obj bounding box: min:(" << mesh.bounding_box[0].x << "," << mesh.bounding_box[0].y << "," << mesh.bounding_box[0].z << ") max:"
<< mesh.bounding_box[1].x << "," << mesh.bounding_box[1].y << "," << mesh.bounding_box[1].z << ")" << std::endl;
}
void createVBO(GLuint* vbo)
{
//create vertex buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
//initialize VBO
unsigned int size = width * height * sizeof(float3); // 3 floats
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//register VBO with CUDA
hipGLRegisterBufferObject(*vbo);
}
int main(int argc, char** argv){
// allocate memmory for the accumulation buffer on the GPU
hipMalloc(&accumulatebuffer, width * height * sizeof(float3));
// load triangle meshes in CUDA memory
initCUDAmemoryTriMesh();
// init glut for OpenGL viewport
glutInit(&argc, argv);
// specify the display mode to be RGB and single buffering
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
// specify the initial window position
glutInitWindowPosition(100, 100);
// specify the initial window size
glutInitWindowSize(width, height);
// create the window and set title
glutCreateWindow("Basic triangle mesh path tracer in CUDA");
// init OpenGL
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0.0, width, 0.0, height);
fprintf(stderr, "OpenGL initialized \n");
// register callback function to display graphics:
glutDisplayFunc(disp);
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 ")) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
exit(0);
}
fprintf(stderr, "glew initialized \n");
// call Timer():
Timer(0);
//create VBO (vertex buffer object)
createVBO(&vbo);
fprintf(stderr, "VBO created \n");
// enter the main loop and process events
fprintf(stderr, "Entering glutMainLoop... \n");
glutMainLoop();
// free CUDA memory on exit
hipFree(accumulatebuffer);
hipFree(dev_triangle_p);
hipFree(dptr);
}
| 387fa693c59352b5a4b590ff43f72c2f01b14255.cu | /*
* Basic CUDA based triangle mesh path tracer.
* For background info, see http://raytracey.blogspot.co.nz/2015/12/gpu-path-tracing-tutorial-2-interactive.html
* Based on CUDA ray tracing code from http://cg.alexandra.dk/?p=278
* Copyright (C) 2015 Sam Lapere
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\cuda.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\math_functions.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\vector_types.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\vector_functions.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\device_launch_parameters.h"
#include "cutil_math.h" // required for float3 vector math
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\extras\CUPTI\include\GL\glew.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\extras\CUPTI\include\GL\glut.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\cuda_runtime.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\cuda_gl_interop.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\curand.h"
#include "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v7.5\include\curand_kernel.h"
#define M_PI 3.14159265359f
#define width 1024 // screenwidth
#define height 576 // screenheight
#define samps 1 // samples per pixel per pass
int total_number_of_triangles = 0;
int frames = 0;
// scene bounding box
float3 scene_aabbox_min;
float3 scene_aabbox_max;
// the scene triangles are stored in a 1D CUDA texture of float4 for memory alignment
// store two edges instead of vertices
// each triangle is stored as three float4s: (float4 first_vertex, float4 edge1, float4 edge2)
texture<float4, 1, cudaReadModeElementType> triangle_texture;
// hardcoded camera position
__device__ float3 firstcamorig = { 50, 52, 295.6 };
// OpenGL vertex buffer object for real-time viewport
GLuint vbo;
void *d_vbo_buffer = NULL;
struct Ray {
float3 orig; // ray origin
float3 dir; // ray direction
__device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {}
};
enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance(), only DIFF used here
// SPHERES
struct Sphere {
float rad; // radius
float3 pos, emi, col; // position, emission, color
Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive)
__device__ float intersect(const Ray &r) const { // returns distance, 0 if nohit
// Ray/sphere intersection
// Quadratic formula required to solve ax^2 + bx + c = 0
// Solution x = (-b +- sqrt(b*b - 4ac)) / 2a
// Solve t^2*d.d + 2*t*(o-p).d + (o-p).(o-p)-R^2 = 0
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = dot(op, r.dir);
float disc = b*b - dot(op, op) + rad*rad; // discriminant
if (disc<0) return 0; else disc = sqrtf(disc);
return (t = b - disc)>epsilon ? t : ((t = b + disc)>epsilon ? t : 0);
}
};
// TRIANGLES
// the classic ray triangle intersection: http://www.cs.virginia.edu/~gfx/Courses/2003/ImageSynthesis/papers/Acceleration/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf
// for an explanation see http://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/moller-trumbore-ray-triangle-intersection
__device__ float RayTriangleIntersection(const Ray &r,
const float3 &v0,
const float3 &edge1,
const float3 &edge2)
{
float3 tvec = r.orig - v0;
float3 pvec = cross(r.dir, edge2);
float det = dot(edge1, pvec);
det = __fdividef(1.0f, det); // CUDA intrinsic function
float u = dot(tvec, pvec) * det;
if (u < 0.0f || u > 1.0f)
return -1.0f;
float3 qvec = cross(tvec, edge1);
float v = dot(r.dir, qvec) * det;
if (v < 0.0f || (u + v) > 1.0f)
return -1.0f;
return dot(edge2, qvec) * det;
}
__device__ float3 getTriangleNormal(const int triangleIndex){
float4 edge1 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, triangleIndex * 3 + 2);
// cross product of two triangle edges yields a vector orthogonal to triangle plane
float3 trinormal = cross(make_float3(edge1.x, edge1.y, edge1.z), make_float3(edge2.x, edge2.y, edge2.z));
trinormal = normalize(trinormal);
return trinormal;
}
__device__ void intersectAllTriangles(const Ray& r, float& t_scene, int& triangle_id, const int number_of_triangles, int& geomtype){
for (int i = 0; i < number_of_triangles; i++)
{
// the triangles are packed into the 1D texture using three consecutive float4 structs for each triangle,
// first float4 contains the first vertex, second float4 contains the first precomputed edge, third float4 contains second precomputed edge like this:
// (float4(vertex.x,vertex.y,vertex.z, 0), float4 (egde1.x,egde1.y,egde1.z,0),float4 (egde2.x,egde2.y,egde2.z,0))
// i is triangle index, each triangle represented by 3 float4s in triangle_texture
float4 v0 = tex1Dfetch(triangle_texture, i * 3);
float4 edge1 = tex1Dfetch(triangle_texture, i * 3 + 1);
float4 edge2 = tex1Dfetch(triangle_texture, i * 3 + 2);
// intersect ray with reconstructed triangle
float t = RayTriangleIntersection(r,
make_float3(v0.x, v0.y, v0.z),
make_float3(edge1.x, edge1.y, edge1.z),
make_float3(edge2.x, edge2.y, edge2.z));
// keep track of closest distance and closest triangle
// if ray/tri intersection finds an intersection point that is closer than closest intersection found so far
if (t < t_scene && t > 0.001)
{
t_scene = t;
triangle_id = i;
geomtype = 3;
}
}
}
// AXIS ALIGNED BOXES
// helper functions
inline __device__ float3 minf3(float3 a, float3 b){ return make_float3(a.x < b.x ? a.x : b.x, a.y < b.y ? a.y : b.y, a.z < b.z ? a.z : b.z); }
inline __device__ float3 maxf3(float3 a, float3 b){ return make_float3(a.x > b.x ? a.x : b.x, a.y > b.y ? a.y : b.y, a.z > b.z ? a.z : b.z); }
inline __device__ float minf1(float a, float b){ return a < b ? a : b; }
inline __device__ float maxf1(float a, float b){ return a > b ? a : b; }
struct Box {
float3 min; // minimum bounds
float3 max; // maximum bounds
float3 emi; // emission
float3 col; // colour
Refl_t refl; // material type
// ray/box intersection
// for theoretical background of the algorithm see
// http://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
// optimised code from http://www.gamedev.net/topic/495636-raybox-collision-intersection-point/
__device__ float intersect(const Ray &r) const {
float epsilon = 0.001f; // required to prevent self intersection
float3 tmin = (min - r.orig) / r.dir;
float3 tmax = (max - r.orig) / r.dir;
float3 real_min = minf3(tmin, tmax);
float3 real_max = maxf3(tmin, tmax);
float minmax = minf1(minf1(real_max.x, real_max.y), real_max.z);
float maxmin = maxf1(maxf1(real_min.x, real_min.y), real_min.z);
if (minmax >= maxmin) { return maxmin > epsilon ? maxmin : 0; }
else return 0;
}
// calculate normal for point on axis aligned box
__device__ float3 Box::normalAt(float3 &point) {
float3 normal = make_float3(0.f, 0.f, 0.f);
float min_distance = 1e8;
float distance;
float epsilon = 0.001f;
if (fabs(min.x - point.x) < epsilon) normal = make_float3(-1, 0, 0);
else if (fabs(max.x - point.x) < epsilon) normal = make_float3(1, 0, 0);
else if (fabs(min.y - point.y) < epsilon) normal = make_float3(0, -1, 0);
else if (fabs(max.y - point.y) < epsilon) normal = make_float3(0, 1, 0);
else if (fabs(min.z - point.z) < epsilon) normal = make_float3(0, 0, -1);
else normal = make_float3(0, 0, 1);
return normal;
}
};
// scene: 9 spheres forming a Cornell box
// small enough to fit in constant GPU memory
__constant__ Sphere spheres[] = {
// FORMAT: { float radius, float3 position, float3 emission, float3 colour, Refl_t material }
// cornell box
//{ 1e5f, { 1e5f + 1.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { 0.75f, 0.25f, 0.25f }, DIFF }, //Left 1e5f
//{ 1e5f, { -1e5f + 99.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .25f, .25f, .75f }, DIFF }, //Right
//{ 1e5f, { 50.0f, 40.8f, 1e5f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Back
//{ 1e5f, { 50.0f, 40.8f, -1e5f + 600.0f }, { 0.0f, 0.0f, 0.0f }, { 0.00f, 0.00f, 0.00f }, DIFF }, //Front
//{ 1e5f, { 50.0f, -1e5f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Bottom
//{ 1e5f, { 50.0f, -1e5f + 81.6f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Top
//{ 16.5f, { 27.0f, 16.5f, 47.0f }, { 0.0f, 0.0f, 0.0f }, { 0.99f, 0.99f, 0.99f }, SPEC }, // small sphere 1
//{ 16.5f, { 73.0f, 16.5f, 78.0f }, { 0.0f, 0.f, .0f }, { 0.09f, 0.49f, 0.3f }, REFR }, // small sphere 2
//{ 600.0f, { 50.0f, 681.6f - .5f, 81.6f }, { 3.0f, 2.5f, 2.0f }, { 0.0f, 0.0f, 0.0f }, DIFF } // Light 12, 10 ,8
//outdoor scene: radius, position, emission, color, material
//{ 1600, { 3000.0f, 10, 6000 }, { 37, 34, 30 }, { 0.f, 0.f, 0.f }, DIFF }, // 37, 34, 30 // sun
//{ 1560, { 3500.0f, 0, 7000 }, { 50, 25, 2.5 }, { 0.f, 0.f, 0.f }, DIFF }, // 150, 75, 7.5 // sun 2
{ 10000, { 50.0f, 40.8f, -1060 }, { 0.0003, 0.01, 0.15 }, { 0.175f, 0.175f, 0.25f }, DIFF }, // sky
{ 100000, { 50.0f, -100000, 0 }, { 0.0, 0.0, 0 }, { 0.8f, 0.2f, 0.f }, DIFF }, // ground
{ 110000, { 50.0f, -110048.5, 0 }, { 3.6, 2.0, 0.2 }, { 0.f, 0.f, 0.f }, DIFF }, // horizon brightener
{ 4e4, { 50.0f, -4e4 - 30, -3000 }, { 0, 0, 0 }, { 0.2f, 0.2f, 0.2f }, DIFF }, // mountains
{ 82.5, { 30.0f, 180.5, 42 }, { 16, 12, 6 }, { .6f, .6f, 0.6f }, DIFF }, // small sphere 1
{ 12, { 115.0f, 10, 105 }, { 0.0, 0.0, 0.0 }, { 0.9f, 0.9f, 0.9f }, REFR }, // small sphere 2
{ 22, { 65.0f, 22, 24 }, { 0, 0, 0 }, { 0.9f, 0.9f, 0.9f }, SPEC }, // small sphere 3
};
__constant__ Box boxes[] = {
// FORMAT: { float3 minbounds, float3 maxbounds, float3 emission, float3 colour, Refl_t }
{ { 5.0f, 0.0f, 70.0f }, { 45.0f, 11.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ { 85.0f, 0.0f, 95.0f }, { 95.0f, 20.0f, 105.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
{ { 75.0f, 20.0f, 85.0f }, { 105.0f, 22.0f, 115.0f }, { .0f, .0f, 0.0f }, { 0.5f, 0.5f, 0.5f }, DIFF },
};
__device__ inline bool intersect_scene(const Ray &r, float &t, int &sphere_id, int &box_id, int& triangle_id, const int number_of_triangles, int &geomtype, const float3& bbmin, const float3& bbmax){
float tmin = 1e20;
float tmax = -1e20;
float d = 1e21;
float k = 1e21;
float q = 1e21;
float inf = t = 1e20;
// SPHERES
// intersect all spheres in the scene
float numspheres = sizeof(spheres) / sizeof(Sphere);
for (int i = int(numspheres); i--;) // for all spheres in scene
// keep track of distance from origin to closest intersection point
if ((d = spheres[i].intersect(r)) && d < t){ t = d; sphere_id = i; geomtype = 1; }
// BOXES
// intersect all boxes in the scene
float numboxes = sizeof(boxes) / sizeof(Box);
for (int i = int(numboxes); i--;) // for all boxes in scene
if ((k = boxes[i].intersect(r)) && k < t){ t = k; box_id = i; geomtype = 2; }
// TRIANGLES
Box scene_bbox; // bounding box around triangle meshes
scene_bbox.min = bbmin;
scene_bbox.max = bbmax;
// if ray hits bounding box of triangle meshes, intersect ray with all triangles
if (scene_bbox.intersect(r)){
intersectAllTriangles(r, t, triangle_id, number_of_triangles, geomtype);
}
// t is distance to closest intersection of ray with all primitives in the scene (spheres, boxes and triangles)
return t<inf;
}
// hash function to calculate new seed for each frame
// see http://www.reedbeta.com/blog/2013/01/12/quick-and-easy-gpu-random-numbers-in-d3d11/
uint WangHash(uint a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
// radiance function
// compute path bounces in scene and accumulate returned color from each path sgment
__device__ float3 radiance(Ray &r, curandState *randstate, const int totaltris, const float3& scene_aabb_min, const float3& scene_aabb_max){ // returns ray color
// colour mask
float3 mask = make_float3(1.0f, 1.0f, 1.0f);
// accumulated colour
float3 accucolor = make_float3(0.0f, 0.0f, 0.0f);
for (int bounces = 0; bounces < 5; bounces++){ // iteration up to 4 bounces (instead of recursion in CPU code)
// reset scene intersection function parameters
float t = 100000; // distance to intersection
int sphere_id = -1;
int box_id = -1; // index of intersected sphere
int triangle_id = -1;
int geomtype = -1;
float3 f; // primitive colour
float3 emit; // primitive emission colour
float3 x; // intersection point
float3 n; // normal
float3 nl; // oriented normal
float3 d; // ray direction of next path segment
Refl_t refltype;
// intersect ray with scene
// intersect_scene keeps track of closest intersected primitive and distance to closest intersection point
if (!intersect_scene(r, t, sphere_id, box_id, triangle_id, totaltris, geomtype, scene_aabb_min, scene_aabb_max))
return make_float3(0.0f, 0.0f, 0.0f); // if miss, return black
// else: we've got a hit with a scene primitive
// determine geometry type of primitive: sphere/box/triangle
// if sphere:
if (geomtype == 1){
Sphere &sphere = spheres[sphere_id]; // hit object with closest intersection
x = r.orig + r.dir*t; // intersection point on object
n = normalize(x - sphere.pos); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = sphere.col; // object colour
refltype = sphere.refl;
emit = sphere.emi; // object emission
accucolor += (mask * emit);
}
// if box:
if (geomtype == 2){
Box &box = boxes[box_id];
x = r.orig + r.dir*t; // intersection point on object
n = normalize(box.normalAt(x)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
f = box.col; // box colour
refltype = box.refl;
emit = box.emi; // box emission
accucolor += (mask * emit);
}
// if triangle:
if (geomtype == 3){
int tri_index = triangle_id;
x = r.orig + r.dir*t; // intersection point
n = normalize(getTriangleNormal(tri_index)); // normal
nl = dot(n, r.dir) < 0 ? n : n * -1; // correctly oriented normal
// colour, refltype and emit value are hardcoded and apply to all triangles
// no per triangle material support yet
f = make_float3(0.9f, 0.4f, 0.1f); // triangle colour
refltype = REFR;
emit = make_float3(0.0f, 0.0f, 0.0f);
accucolor += (mask * emit);
}
// SHADING: diffuse, specular or refractive
// ideal diffuse reflection (see "Realistic Ray Tracing", P. Shirley)
if (refltype == DIFF){
// create 2 random numbers
float r1 = 2 * M_PI * curand_uniform(randstate);
float r2 = curand_uniform(randstate);
float r2s = sqrtf(r2);
// compute orthonormal coordinate frame uvw with hitpoint as origin
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
// compute cosine weighted random ray direction on hemisphere
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
// offset origin next path segment to prevent self intersection
x += nl * 0.03;
// multiply mask with colour of object
mask *= f;
}
// ideal specular reflection (mirror)
if (refltype == SPEC){
// compute relfected ray direction according to Snell's law
d = r.dir - 2.0f * n * dot(n, r.dir);
// offset origin next path segment to prevent self intersection
x += nl * 0.01f;
// multiply mask with colour of object
mask *= f;
}
// ideal refraction (based on smallpt code by Kevin Beason)
if (refltype == REFR){
bool into = dot(n, nl) > 0; // is ray entering or leaving refractive material?
float nc = 1.0f; // Index of Refraction air
float nt = 1.5f; // Index of Refraction glass/water
float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials
float ddn = dot(r.dir, nl);
float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn);
if (cos2t < 0.0f) // total internal reflection
{
d = reflect(r.dir, n); //d = r.dir - 2.0f * n * dot(n, r.dir);
x += nl * 0.01f;
}
else // cos2t > 0
{
// compute direction of transmission ray
float3 tdir = normalize(r.dir * nnt - n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t))));
float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc);
float c = 1.f - (into ? -ddn : dot(tdir, n));
float Re = R0 + (1.f - R0) * c * c * c * c * c;
float Tr = 1 - Re; // Transmission
float P = .25f + .5f * Re;
float RP = Re / P;
float TP = Tr / (1.f - P);
// randomly choose reflection or transmission ray
if (curand_uniform(randstate) < 0.25) // reflection ray
{
mask *= RP;
d = reflect(r.dir, n);
x += nl * 0.02f;
}
else // transmission ray
{
mask *= TP;
d = tdir; //r = Ray(x, tdir);
x += nl * 0.0005f; // epsilon must be small to avoid artefacts
}
}
}
// set up origin and direction of next path segment
r.orig = x;
r.dir = d;
}
// add radiance up to a certain ray depth
// return accumulated ray colour after all bounces are computed
return accucolor;
}
// required to convert colour to a format that OpenGL can display
union Colour // 4 bytes = 4 chars = 1 float
{
float c;
uchar4 components;
};
__global__ void render_kernel(float3 *output, float3* accumbuffer, const int numtriangles, int framenumber, uint hashedframenumber, float3 scene_bbmin, float3 scene_bbmax){ // float3 *gputexdata1, int *texoffsets
// assign a CUDA thread to every pixel by using the threadIndex
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// global threadId, see richiesams blogspot
int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
// create random number generator, see RichieSams blogspot
curandState randState; // state of the random number generator, to prevent repetition
curand_init(hashedframenumber + threadId, 0, 0, &randState);
Ray cam(firstcamorig, normalize(make_float3(0, -0.042612, -1)));
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset along X-axis
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray dir offset along Y-axis, .5135 is FOV angle
float3 pixelcol; // final pixel color
int i = (height - y - 1)*width + x; // pixel index
pixelcol = make_float3(0.0f, 0.0f, 0.0f); // reset to zero for every pixel
for (int s = 0; s < samps; s++){
// compute primary ray direction
float3 d = cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5) + cam.dir;
// normalize primary ray direction
d = normalize(d);
// add accumulated colour from path bounces
pixelcol += radiance(Ray(cam.orig + d * 40, d), &randState, numtriangles, scene_bbmin, scene_bbmax)*(1. / samps);
} // Camera rays are pushed ^^^^^ forward to start in interior
// add pixel colour to accumulation buffer (accumulates all samples)
accumbuffer[i] += pixelcol;
// averaged colour: divide colour by the number of calculated frames so far
float3 tempcol = accumbuffer[i] / framenumber;
Colour fcolour;
float3 colour = make_float3(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f));
// convert from 96-bit to 24-bit colour + perform gamma correction
fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / 2.2f) * 255), (unsigned char)(powf(colour.y, 1 / 2.2f) * 255), (unsigned char)(powf(colour.z, 1 / 2.2f) * 255), 1);
// store pixel coordinates and pixelcolour in OpenGL readable outputbuffer
output[i] = make_float3(x, y, fcolour.c);
}
void Timer(int obsolete) {
glutPostRedisplay();
glutTimerFunc(30, Timer, 0);
}
__device__ float timer = 0.0f;
inline float clamp(float x){ return x<0 ? 0 : x>1 ? 1 : x; }
//inline int toInt(float x){ return int(pow(clamp(x), 1 / 2.2) * 255 + .5); } // RGB float in range [0,1] to int in range [0, 255]
// buffer for accumulating samples over several frames
float3* accumulatebuffer;
// output buffer
float3 *dptr;
void disp(void)
{
frames++;
cudaThreadSynchronize();
// map vertex buffer object for acces by CUDA
cudaGLMapBufferObject((void**)&dptr, vbo);
//clear all pixels:
glClear(GL_COLOR_BUFFER_BIT);
// RAY TRACING:
// dim3 grid(WINDOW / block.x, WINDOW / block.y, 1);
// dim3 CUDA specific syntax, block and grid are required to schedule CUDA threads over streaming multiprocessors
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
// launch CUDA path tracing kernel, pass in a hashed seed based on number of frames
render_kernel << < grid, block >> >(dptr, accumulatebuffer, total_number_of_triangles, frames, WangHash(frames), scene_aabbox_max, scene_aabbox_min); // launches CUDA render kernel from the host
cudaThreadSynchronize();
// unmap buffer
cudaGLUnmapBufferObject(vbo);
//glFlush();
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(2, GL_FLOAT, 12, 0);
glColorPointer(4, GL_UNSIGNED_BYTE, 12, (GLvoid*)8);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glDrawArrays(GL_POINTS, 0, width * height);
glDisableClientState(GL_VERTEX_ARRAY);
glutSwapBuffers();
//glutPostRedisplay();
}
// load triangle data in a CUDA texture
extern "C"
{
void bindTriangles(float *dev_triangle_p, unsigned int number_of_triangles)
{
triangle_texture.normalized = false; // access with normalized texture coordinates
triangle_texture.filterMode = cudaFilterModePoint; // Point mode, so no
triangle_texture.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates
size_t size = sizeof(float4)*number_of_triangles * 3;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
cudaBindTexture(0, triangle_texture, dev_triangle_p, channelDesc, size);
}
}
// helpers to load triangle data
struct TriangleFace
{
int v[3]; // vertex indices
};
struct TriangleMesh
{
std::vector<float3> verts;
std::vector<TriangleFace> faces;
float3 bounding_box[2];
};
TriangleMesh mesh1;
TriangleMesh mesh2;
float *dev_triangle_p; // the cuda device pointer that points to the uploaded triangles
void loadObj(const std::string filename, TriangleMesh &mesh); // forward declaration
// 1. load triangle mesh data from obj files
// 2. copy data to CPU memory (into vector<float4> triangles)
// 3. copy to CUDA global memory (allocated with dev_triangle_p pointer)
// 4. copy to CUDA texture memory with bindtriangles()
void initCUDAmemoryTriMesh()
{
loadObj("data/bunny.obj", mesh1);
loadObj("data/bunny.obj", mesh2);
// scalefactor and offset to position/scale triangle meshes
float scalefactor1 = 200;
float scalefactor2 = 300; // 300
float3 offset1 = make_float3(90, 22, 100);// (30, -2, 80);
float3 offset2 = make_float3(30, -2, 80);
std::vector<float4> triangles;
for (unsigned int i = 0; i < mesh1.faces.size(); i++)
{
// make a local copy of the triangle vertices
float3 v0 = mesh1.verts[mesh1.faces[i].v[0] - 1];
float3 v1 = mesh1.verts[mesh1.faces[i].v[1] - 1];
float3 v2 = mesh1.verts[mesh1.faces[i].v[2] - 1];
// scale
v0 *= scalefactor1;
v1 *= scalefactor1;
v2 *= scalefactor1;
// translate
v0 += offset1;
v1 += offset1;
v2 += offset1;
// store triangle data as float4
// store two edges per triangle instead of vertices, to save some calculations in the
// ray triangle intersection test
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 0));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
// compute bounding box of this mesh
mesh1.bounding_box[0] *= scalefactor1; mesh1.bounding_box[0] += offset1;
mesh1.bounding_box[1] *= scalefactor1; mesh1.bounding_box[1] += offset1;
for (unsigned int i = 0; i < mesh2.faces.size(); i++)
{
float3 v0 = mesh2.verts[mesh2.faces[i].v[0] - 1];
float3 v1 = mesh2.verts[mesh2.faces[i].v[1] - 1];
float3 v2 = mesh2.verts[mesh2.faces[i].v[2] - 1];
v0 *= scalefactor2;
v1 *= scalefactor2;
v2 *= scalefactor2;
v0 += offset2;
v1 += offset2;
v2 += offset2;
triangles.push_back(make_float4(v0.x, v0.y, v0.z, 0));
triangles.push_back(make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, 1));
triangles.push_back(make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, 0));
}
mesh2.bounding_box[0] *= scalefactor2; mesh2.bounding_box[0] += offset2;
mesh2.bounding_box[1] *= scalefactor2; mesh2.bounding_box[1] += offset2;
std::cout << "total number of triangles check:" << mesh1.faces.size() + mesh2.faces.size() << " == " << triangles.size() / 3 << std::endl;
// calculate total number of triangles in the scene
size_t triangle_size = triangles.size() * sizeof(float4);
int total_num_triangles = triangles.size() / 3;
total_number_of_triangles = total_num_triangles;
if (triangle_size > 0)
{
// allocate memory for the triangle meshes on the GPU
cudaMalloc((void **)&dev_triangle_p, triangle_size);
// copy triangle data to GPU
cudaMemcpy(dev_triangle_p, &triangles[0], triangle_size, cudaMemcpyHostToDevice);
// load triangle data into a CUDA texture
bindTriangles(dev_triangle_p, total_num_triangles);
}
// compute scene bounding box by merging bounding boxes of individual meshes
scene_aabbox_min = mesh2.bounding_box[0];
scene_aabbox_max = mesh2.bounding_box[1];
scene_aabbox_min = fminf(scene_aabbox_min, mesh1.bounding_box[0]);
scene_aabbox_max = fmaxf(scene_aabbox_max, mesh1.bounding_box[1]);
}
// read triangle data from obj file
void loadObj(const std::string filename, TriangleMesh &mesh)
{
std::ifstream in(filename.c_str());
if (!in.good())
{
std::cout << "ERROR: loading obj:(" << filename << ") file not found or not good" << "\n";
system("PAUSE");
exit(0);
}
char buffer[256], str[255];
float f1, f2, f3;
while (!in.getline(buffer, 255).eof())
{
buffer[255] = '\0';
sscanf_s(buffer, "%s", str, 255);
// reading a vertex
if (buffer[0] == 'v' && (buffer[1] == ' ' || buffer[1] == 32)){
if (sscanf(buffer, "v %f %f %f", &f1, &f2, &f3) == 3){
mesh.verts.push_back(make_float3(f1, f2, f3));
}
else{
std::cout << "ERROR: vertex not in wanted format in OBJLoader" << "\n";
exit(-1);
}
}
// reading faceMtls
else if (buffer[0] == 'f' && (buffer[1] == ' ' || buffer[1] == 32))
{
TriangleFace f;
int nt = sscanf(buffer, "f %d %d %d", &f.v[0], &f.v[1], &f.v[2]);
if (nt != 3){
std::cout << "ERROR: I don't know the format of that FaceMtl" << "\n";
exit(-1);
}
mesh.faces.push_back(f);
}
}
// calculate the bounding box of the mesh
mesh.bounding_box[0] = make_float3(1000000, 1000000, 1000000);
mesh.bounding_box[1] = make_float3(-1000000, -1000000, -1000000);
for (unsigned int i = 0; i < mesh.verts.size(); i++)
{
//update min and max value
mesh.bounding_box[0] = fminf(mesh.verts[i], mesh.bounding_box[0]);
mesh.bounding_box[1] = fmaxf(mesh.verts[i], mesh.bounding_box[1]);
}
std::cout << "obj file loaded: number of faces:" << mesh.faces.size() << " number of vertices:" << mesh.verts.size() << std::endl;
std::cout << "obj bounding box: min:(" << mesh.bounding_box[0].x << "," << mesh.bounding_box[0].y << "," << mesh.bounding_box[0].z << ") max:"
<< mesh.bounding_box[1].x << "," << mesh.bounding_box[1].y << "," << mesh.bounding_box[1].z << ")" << std::endl;
}
void createVBO(GLuint* vbo)
{
//create vertex buffer object
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
//initialize VBO
unsigned int size = width * height * sizeof(float3); // 3 floats
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//register VBO with CUDA
cudaGLRegisterBufferObject(*vbo);
}
int main(int argc, char** argv){
// allocate memmory for the accumulation buffer on the GPU
cudaMalloc(&accumulatebuffer, width * height * sizeof(float3));
// load triangle meshes in CUDA memory
initCUDAmemoryTriMesh();
// init glut for OpenGL viewport
glutInit(&argc, argv);
// specify the display mode to be RGB and single buffering
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
// specify the initial window position
glutInitWindowPosition(100, 100);
// specify the initial window size
glutInitWindowSize(width, height);
// create the window and set title
glutCreateWindow("Basic triangle mesh path tracer in CUDA");
// init OpenGL
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
gluOrtho2D(0.0, width, 0.0, height);
fprintf(stderr, "OpenGL initialized \n");
// register callback function to display graphics:
glutDisplayFunc(disp);
glewInit();
if (!glewIsSupported("GL_VERSION_2_0 ")) {
fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing.");
fflush(stderr);
exit(0);
}
fprintf(stderr, "glew initialized \n");
// call Timer():
Timer(0);
//create VBO (vertex buffer object)
createVBO(&vbo);
fprintf(stderr, "VBO created \n");
// enter the main loop and process events
fprintf(stderr, "Entering glutMainLoop... \n");
glutMainLoop();
// free CUDA memory on exit
cudaFree(accumulatebuffer);
cudaFree(dev_triangle_p);
cudaFree(dptr);
}
|
09afe316969f3f9e12161c525920d84fa5dc4aa2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void checkIndex()
{
printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) "
"gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem+block.x-1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
hipLaunchKernelGGL(( checkIndex) , dim3(grid), dim3(block), 0, 0, );
// reset device before leaving
hipDeviceReset();
return 0;
}
| 09afe316969f3f9e12161c525920d84fa5dc4aa2.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex()
{
printf("ThreadIdx:(%d %d %d) blockIdx:(%d %d %d) blockDim:(%d %d %d) "
"gridDim:(%d %d %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y,
blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
// define total data element
int nElem = 6;
// define grid and block structure
dim3 block(3);
dim3 grid((nElem+block.x-1) / block.x);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
// check grid and block dimension from device side
checkIndex <<<grid, block>>> ();
// reset device before leaving
cudaDeviceReset();
return 0;
}
|
1830bcaeacb3678c4ce545c09a78b7719d822a54.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
**/
#include "reader_impl.hpp"
#include "timezone.h"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
// Import functionality that's independent of legacy code
using namespace cudf::io::orc;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
**/
constexpr type_id to_type_id(const orc::SchemaType &schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float or int
return (decimals_as_float) ? type_id::FLOAT64 : type_id::INT64;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
**/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
} // namespace
/**
* @brief A helper class for ORC file metadata. Provides some additional
* convenience methods for initializing and accessing metadata.
**/
class metadata {
using OrcStripeInfo = std::pair<const StripeInformation *, const StripeFooter *>;
public:
explicit metadata(datasource *const src) : source(src)
{
const auto len = source->size();
const auto max_ps_size = ::min(len, static_cast<size_t>(256));
// Read uncompressed postscript section (max 255 bytes + 1 byte for length)
auto buffer = source->host_read(len - max_ps_size, max_ps_size);
const size_t ps_length = buffer->data()[max_ps_size - 1];
const uint8_t *ps_data = &buffer->data()[max_ps_size - ps_length - 1];
ProtobufReader pb;
pb.init(ps_data, ps_length);
CUDF_EXPECTS(pb.read(&ps, ps_length), "Cannot read postscript");
CUDF_EXPECTS(ps.footerLength + ps_length < len, "Invalid footer length");
// If compression is used, all the rest of the metadata is compressed
// If no compressed is used, the decompressor is simply a pass-through
decompressor = std::make_unique<OrcDecompressor>(ps.compression, ps.compressionBlockSize);
// Read compressed filefooter section
buffer = source->host_read(len - ps_length - 1 - ps.footerLength, ps.footerLength);
size_t ff_length = 0;
auto ff_data = decompressor->Decompress(buffer->data(), ps.footerLength, &ff_length);
pb.init(ff_data, ff_length);
CUDF_EXPECTS(pb.read(&ff, ff_length), "Cannot read filefooter");
CUDF_EXPECTS(get_num_columns() > 0, "No columns found");
}
/**
* @brief Filters and reads the info of only a selection of stripes
*
* @param[in] stripe Index of the stripe to select
* @param[in] max_stripe_count Number of stripes to select for stripe-based selection
* @param[in] stripe_indices Indices of individual stripes [max_stripe_count]
* @param[in] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*
* @return List of stripe info and total number of selected rows
**/
auto select_stripes(size_type stripe,
size_type max_stripe_count,
const size_type *stripe_indices,
size_type &row_start,
size_type &row_count)
{
std::vector<OrcStripeInfo> selection;
if (stripe_indices) {
size_t stripe_rows = 0;
for (auto i = 0; i < max_stripe_count; i++) {
auto stripe_idx = stripe_indices[i];
CUDF_EXPECTS(stripe_idx >= 0 && stripe_idx < get_num_stripes(), "Invalid stripe index");
selection.emplace_back(&ff.stripes[stripe_idx], nullptr);
stripe_rows += ff.stripes[stripe_idx].numberOfRows;
}
row_count = static_cast<size_type>(stripe_rows);
} else if (stripe != -1) {
CUDF_EXPECTS(stripe < get_num_stripes(), "Non-existent stripe");
size_t stripe_rows = 0;
do {
if (row_count >= 0 && stripe_rows >= (size_t)row_count) { break; }
selection.emplace_back(&ff.stripes[stripe], nullptr);
stripe_rows += ff.stripes[stripe].numberOfRows;
} while (--max_stripe_count > 0 && ++stripe < get_num_stripes());
row_count = (row_count < 0) ? static_cast<size_type>(stripe_rows)
: ::min(row_count, static_cast<size_type>(stripe_rows));
} else {
row_start = ::max(row_start, 0);
if (row_count < 0) {
row_count = static_cast<size_type>(
std::min<size_t>(get_total_rows(), std::numeric_limits<size_type>::max()));
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(static_cast<size_t>(row_start) <= get_total_rows(), "Invalid row start");
size_type stripe_skip_rows = 0;
for (size_t i = 0, count = 0; i < ff.stripes.size(); ++i) {
count += ff.stripes[i].numberOfRows;
if (count > static_cast<size_t>(row_start)) {
if (selection.size() == 0) {
stripe_skip_rows =
static_cast<size_type>(row_start - (count - ff.stripes[i].numberOfRows));
}
selection.emplace_back(&ff.stripes[i], nullptr);
}
if (count >= static_cast<size_t>(row_start) + static_cast<size_t>(row_count)) { break; }
}
row_start = stripe_skip_rows;
}
// Read each stripe's stripefooter metadata
if (not selection.empty()) {
orc::ProtobufReader pb;
stripefooters.resize(selection.size());
for (size_t i = 0; i < selection.size(); ++i) {
const auto stripe = selection[i].first;
const auto sf_comp_offset = stripe->offset + stripe->indexLength + stripe->dataLength;
const auto sf_comp_length = stripe->footerLength;
CUDF_EXPECTS(sf_comp_offset + sf_comp_length < source->size(),
"Invalid stripe information");
const auto buffer = source->host_read(sf_comp_offset, sf_comp_length);
size_t sf_length = 0;
auto sf_data = decompressor->Decompress(buffer->data(), sf_comp_length, &sf_length);
pb.init(sf_data, sf_length);
CUDF_EXPECTS(pb.read(&stripefooters[i], sf_length), "Cannot read stripefooter");
selection[i].second = &stripefooters[i];
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
* @param[out] has_timestamp_column Whether there is a orc::TIMESTAMP column
*
* @return List of ORC column indexes
**/
auto select_columns(std::vector<std::string> use_names, bool &has_timestamp_column)
{
std::vector<int> selection;
if (not use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < get_num_columns(); ++i, ++index) {
if (index >= get_num_columns()) { index = 0; }
if (ff.GetColumnName(index) == use_name) {
selection.emplace_back(index);
if (ff.types[index].kind == orc::TIMESTAMP) { has_timestamp_column = true; }
index++;
break;
}
}
}
} else {
// For now, only select all leaf nodes
for (int i = 0; i < get_num_columns(); ++i) {
if (ff.types[i].subtypes.size() == 0) {
selection.emplace_back(i);
if (ff.types[i].kind == orc::TIMESTAMP) { has_timestamp_column = true; }
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
inline size_t get_total_rows() const { return ff.numberOfRows; }
inline int get_num_stripes() const { return ff.stripes.size(); }
inline int get_num_columns() const { return ff.types.size(); }
inline int get_row_index_stride() const { return ff.rowIndexStride; }
public:
PostScript ps;
FileFooter ff;
std::vector<StripeFooter> stripefooters;
std::unique_ptr<OrcDecompressor> decompressor;
private:
datasource *const source;
};
namespace {
/**
* @brief Struct that maps ORC streams to columns
**/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
uint32_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
**/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter,
const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<gpu::ColumnDesc> &chunks,
std::vector<orc_stream_info> &stream_info)
{
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[stream.column]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries += stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
} // namespace
rmm::device_buffer reader::impl::decompress_stripe_data(
hostdevice_vector<gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const OrcDecompressor *decompressor,
std::vector<orc_stream_info> &stream_info,
size_t num_stripes,
rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
hipStream_t stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto &info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
CUDA_TRY(hipMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream));
CUDA_TRY(hipMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_vector<gpu_inflate_input_s> inflate_in(num_compressed_blocks +
num_uncompressed_blocks);
rmm::device_vector<gpu_inflate_status_s> inflate_out(num_compressed_blocks);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data().get() + start_pos;
compinfo[i].decstatus = inflate_out.data().get() + start_pos;
compinfo[i].copyctl = inflate_in.data().get() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
CUDA_TRY(hipMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream));
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(gpuinflate(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data().get() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
CUDA_TRY(gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream));
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
CUDA_TRY(hipMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
hipMemcpyDeviceToHost,
stream));
CUDA_TRY(hipStreamSynchronize(stream));
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseRowGroupIndex(row_groups.data().get(),
compinfo.device_ptr(),
chunks.device_ptr(),
num_columns,
num_stripes,
row_groups.size() / num_columns,
row_index_stride,
stream));
}
return decomp_data;
}
void reader::impl::decode_stream_data(hostdevice_vector<gpu::ColumnDesc> &chunks,
size_t num_dicts,
size_t skip_rows,
size_t num_rows,
const std::vector<int64_t> &timezone_table,
const rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
std::vector<column_buffer> &out_buffers,
hipStream_t stream)
{
const auto num_columns = out_buffers.size();
const auto num_stripes = chunks.size() / out_buffers.size();
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.column_data_base = out_buffers[j].data();
chunk.valid_map_base = out_buffers[j].null_mask();
}
}
// Allocate global dictionary for deserializing
rmm::device_vector<gpu::DictionaryEntry> global_dict(num_dicts);
// Allocate timezone transition table timestamp conversion
rmm::device_vector<int64_t> tz_table = timezone_table;
CUDA_TRY(hipMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream));
CUDA_TRY(gpu::DecodeNullsAndStringDictionaries(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
stream));
CUDA_TRY(gpu::DecodeOrcColumnData(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
tz_table.data().get(),
tz_table.size(),
row_groups.data().get(),
row_groups.size() / num_columns,
row_index_stride,
stream));
CUDA_TRY(hipMemcpyAsync(
chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
out_buffers[j].null_count() += chunks[i * num_columns + j].null_count;
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr)
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.columns, _has_timestamp_column);
// Override output timestamp resolution if requested
if (options.timestamp_type.id() != EMPTY) { _timestamp_type = options.timestamp_type; }
// Enable or disable attempt to use row index for parsing
_use_index = options.use_index;
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.use_np_dtypes;
// Control decimals conversion (float64 or int64 with optional scale)
_decimals_as_float = options.decimals_as_float;
_decimals_as_int_scale = options.forced_decimals_scale;
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
size_type stripe,
size_type max_stripe_count,
const size_type *stripe_indices,
hipStream_t stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only stripes required (aka row groups)
const auto selected_stripes =
_metadata->select_stripes(stripe, max_stripe_count, stripe_indices, skip_rows, num_rows);
// Association between each ORC column and its cudf::column
std::vector<int32_t> orc_col_map(_metadata->get_num_columns(), -1);
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto col_type = to_type_id(
_metadata->ff.types[col], _use_np_dtypes, _timestamp_type.id(), _decimals_as_float);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
// Map each ORC column to its column
orc_col_map[col] = column_types.size() - 1;
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.size() == 0) {
std::transform(column_types.cbegin(),
column_types.cend(),
std::back_inserter(out_columns),
[](auto const &dtype) { return make_empty_column(dtype); });
} else {
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<gpu::ColumnDesc> chunks(num_chunks, stream);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) &&
_metadata->get_row_index_stride() > 0 && num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(i,
stripe_info,
stripe_footer,
orc_col_map,
_selected_columns,
_metadata->ff.types,
use_index,
&num_dict_entries,
chunks,
stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = _source->host_read(offset, len);
CUDA_TRY(hipMemcpyAsync(d_dst, buffer->data(), len, hipMemcpyHostToDevice, stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
// Update chunks to reference streams pointers
for (size_t j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[_selected_columns[j]].kind;
chunk.type_kind = _metadata->ff.types[_selected_columns[j]].kind;
if (_decimals_as_float) {
chunk.decimal_scale =
_metadata->ff.types[_selected_columns[j]].scale | ORC_DECIMAL2FLOAT64_SCALE;
} else if (_decimals_as_int_scale < 0) {
chunk.decimal_scale = _metadata->ff.types[_selected_columns[j]].scale;
} else {
chunk.decimal_scale = _decimals_as_int_scale;
}
chunk.rowgroup_id = num_rowgroups;
chunk.dtype_len = (column_types[j].id() == type_id::STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(column_types[j]);
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(_timestamp_type.id());
}
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
if (chunk.strm_len[k] > 0) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups += (stripe_info->numberOfRows + _metadata->get_row_index_stride() - 1) /
_metadata->get_row_index_stride();
}
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
// Setup row group descriptors if using indexes
rmm::device_vector<gpu::RowGroup> row_groups(num_rowgroups * num_columns);
if (_metadata->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(chunks,
stripe_data,
_metadata->decompressor.get(),
stream_info,
selected_stripes.size(),
row_groups,
_metadata->get_row_index_stride(),
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.empty()) {
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseRowGroupIndex(row_groups.data().get(),
nullptr,
chunks.device_ptr(),
num_columns,
selected_stripes.size(),
num_rowgroups,
_metadata->get_row_index_stride(),
stream));
}
}
// Setup table for converting timestamp columns from local to UTC time
std::vector<int64_t> tz_table;
if (_has_timestamp_column) {
CUDF_EXPECTS(
BuildTimezoneTransitionTable(tz_table, selected_stripes[0].second->writerTimezone),
"Cannot setup timezone LUT");
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < selected_stripes.size(); ++j) {
if (chunks[j * num_columns + i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
num_rows,
tz_table,
row_groups,
_metadata->get_row_index_stride(),
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(
make_column(column_types[i], num_rows, out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _metadata->ff.GetColumnName(_selected_columns[i]);
}
// Return user metadata
for (const auto &kv : _metadata->ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::string filepath,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr))
{
}
// Forward to implementation
reader::reader(std::unique_ptr<cudf::io::datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(std::move(source), options, mr))
{
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(hipStream_t stream)
{
return _impl->read(0, -1, -1, -1, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_stripe(size_type stripe,
size_type stripe_count,
hipStream_t stream)
{
return _impl->read(0, -1, stripe, stripe_count, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_stripes(const std::vector<size_type> &stripe_list,
hipStream_t stream)
{
return _impl->read(
0, -1, -1, static_cast<size_type>(stripe_list.size()), stripe_list.data(), stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, hipStream_t stream)
{
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, -1, nullptr, stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
| 1830bcaeacb3678c4ce545c09a78b7719d822a54.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO ORC reader class implementation
**/
#include "reader_impl.hpp"
#include "timezone.h"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <array>
namespace cudf {
namespace io {
namespace detail {
namespace orc {
// Import functionality that's independent of legacy code
using namespace cudf::io::orc;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates ORC data kind to cuDF type enum
**/
constexpr type_id to_type_id(const orc::SchemaType &schema,
bool use_np_dtypes,
type_id timestamp_type_id,
bool decimals_as_float)
{
switch (schema.kind) {
case orc::BOOLEAN: return type_id::BOOL8;
case orc::BYTE: return type_id::INT8;
case orc::SHORT: return type_id::INT16;
case orc::INT: return type_id::INT32;
case orc::LONG: return type_id::INT64;
case orc::FLOAT: return type_id::FLOAT32;
case orc::DOUBLE: return type_id::FLOAT64;
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to STRING
return type_id::STRING;
case orc::TIMESTAMP:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
case orc::DATE:
// There isn't a (DAYS -> np.dtype) mapping
return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS;
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float or int
return (decimals_as_float) ? type_id::FLOAT64 : type_id::INT64;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
**/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child)
{
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(gpu::CI_NUM_STREAMS, 0);
}
}
} // namespace
/**
* @brief A helper class for ORC file metadata. Provides some additional
* convenience methods for initializing and accessing metadata.
**/
class metadata {
using OrcStripeInfo = std::pair<const StripeInformation *, const StripeFooter *>;
public:
explicit metadata(datasource *const src) : source(src)
{
const auto len = source->size();
const auto max_ps_size = std::min(len, static_cast<size_t>(256));
// Read uncompressed postscript section (max 255 bytes + 1 byte for length)
auto buffer = source->host_read(len - max_ps_size, max_ps_size);
const size_t ps_length = buffer->data()[max_ps_size - 1];
const uint8_t *ps_data = &buffer->data()[max_ps_size - ps_length - 1];
ProtobufReader pb;
pb.init(ps_data, ps_length);
CUDF_EXPECTS(pb.read(&ps, ps_length), "Cannot read postscript");
CUDF_EXPECTS(ps.footerLength + ps_length < len, "Invalid footer length");
// If compression is used, all the rest of the metadata is compressed
// If no compressed is used, the decompressor is simply a pass-through
decompressor = std::make_unique<OrcDecompressor>(ps.compression, ps.compressionBlockSize);
// Read compressed filefooter section
buffer = source->host_read(len - ps_length - 1 - ps.footerLength, ps.footerLength);
size_t ff_length = 0;
auto ff_data = decompressor->Decompress(buffer->data(), ps.footerLength, &ff_length);
pb.init(ff_data, ff_length);
CUDF_EXPECTS(pb.read(&ff, ff_length), "Cannot read filefooter");
CUDF_EXPECTS(get_num_columns() > 0, "No columns found");
}
/**
* @brief Filters and reads the info of only a selection of stripes
*
* @param[in] stripe Index of the stripe to select
* @param[in] max_stripe_count Number of stripes to select for stripe-based selection
* @param[in] stripe_indices Indices of individual stripes [max_stripe_count]
* @param[in] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*
* @return List of stripe info and total number of selected rows
**/
auto select_stripes(size_type stripe,
size_type max_stripe_count,
const size_type *stripe_indices,
size_type &row_start,
size_type &row_count)
{
std::vector<OrcStripeInfo> selection;
if (stripe_indices) {
size_t stripe_rows = 0;
for (auto i = 0; i < max_stripe_count; i++) {
auto stripe_idx = stripe_indices[i];
CUDF_EXPECTS(stripe_idx >= 0 && stripe_idx < get_num_stripes(), "Invalid stripe index");
selection.emplace_back(&ff.stripes[stripe_idx], nullptr);
stripe_rows += ff.stripes[stripe_idx].numberOfRows;
}
row_count = static_cast<size_type>(stripe_rows);
} else if (stripe != -1) {
CUDF_EXPECTS(stripe < get_num_stripes(), "Non-existent stripe");
size_t stripe_rows = 0;
do {
if (row_count >= 0 && stripe_rows >= (size_t)row_count) { break; }
selection.emplace_back(&ff.stripes[stripe], nullptr);
stripe_rows += ff.stripes[stripe].numberOfRows;
} while (--max_stripe_count > 0 && ++stripe < get_num_stripes());
row_count = (row_count < 0) ? static_cast<size_type>(stripe_rows)
: std::min(row_count, static_cast<size_type>(stripe_rows));
} else {
row_start = std::max(row_start, 0);
if (row_count < 0) {
row_count = static_cast<size_type>(
std::min<size_t>(get_total_rows(), std::numeric_limits<size_type>::max()));
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(static_cast<size_t>(row_start) <= get_total_rows(), "Invalid row start");
size_type stripe_skip_rows = 0;
for (size_t i = 0, count = 0; i < ff.stripes.size(); ++i) {
count += ff.stripes[i].numberOfRows;
if (count > static_cast<size_t>(row_start)) {
if (selection.size() == 0) {
stripe_skip_rows =
static_cast<size_type>(row_start - (count - ff.stripes[i].numberOfRows));
}
selection.emplace_back(&ff.stripes[i], nullptr);
}
if (count >= static_cast<size_t>(row_start) + static_cast<size_t>(row_count)) { break; }
}
row_start = stripe_skip_rows;
}
// Read each stripe's stripefooter metadata
if (not selection.empty()) {
orc::ProtobufReader pb;
stripefooters.resize(selection.size());
for (size_t i = 0; i < selection.size(); ++i) {
const auto stripe = selection[i].first;
const auto sf_comp_offset = stripe->offset + stripe->indexLength + stripe->dataLength;
const auto sf_comp_length = stripe->footerLength;
CUDF_EXPECTS(sf_comp_offset + sf_comp_length < source->size(),
"Invalid stripe information");
const auto buffer = source->host_read(sf_comp_offset, sf_comp_length);
size_t sf_length = 0;
auto sf_data = decompressor->Decompress(buffer->data(), sf_comp_length, &sf_length);
pb.init(sf_data, sf_length);
CUDF_EXPECTS(pb.read(&stripefooters[i], sf_length), "Cannot read stripefooter");
selection[i].second = &stripefooters[i];
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
* @param[out] has_timestamp_column Whether there is a orc::TIMESTAMP column
*
* @return List of ORC column indexes
**/
auto select_columns(std::vector<std::string> use_names, bool &has_timestamp_column)
{
std::vector<int> selection;
if (not use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < get_num_columns(); ++i, ++index) {
if (index >= get_num_columns()) { index = 0; }
if (ff.GetColumnName(index) == use_name) {
selection.emplace_back(index);
if (ff.types[index].kind == orc::TIMESTAMP) { has_timestamp_column = true; }
index++;
break;
}
}
}
} else {
// For now, only select all leaf nodes
for (int i = 0; i < get_num_columns(); ++i) {
if (ff.types[i].subtypes.size() == 0) {
selection.emplace_back(i);
if (ff.types[i].kind == orc::TIMESTAMP) { has_timestamp_column = true; }
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
inline size_t get_total_rows() const { return ff.numberOfRows; }
inline int get_num_stripes() const { return ff.stripes.size(); }
inline int get_num_columns() const { return ff.types.size(); }
inline int get_row_index_stride() const { return ff.rowIndexStride; }
public:
PostScript ps;
FileFooter ff;
std::vector<StripeFooter> stripefooters;
std::unique_ptr<OrcDecompressor> decompressor;
private:
datasource *const source;
};
namespace {
/**
* @brief Struct that maps ORC streams to columns
**/
struct orc_stream_info {
orc_stream_info() = default;
explicit orc_stream_info(
uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_)
{
}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to start of compressed stripe data
uint32_t length; // length in file
uint32_t gdf_idx; // column index
uint32_t stripe_idx; // stripe index
};
/**
* @brief Function that populates column descriptors stream/chunk
**/
size_t gather_stream_info(const size_t stripe_index,
const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter,
const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc,
const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<gpu::ColumnDesc> &chunks,
std::vector<orc_stream_info> &stream_info)
{
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx =
get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[stream.column]);
if (idx.first < gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len = stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries += stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(
stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
} // namespace
rmm::device_buffer reader::impl::decompress_stripe_data(
hostdevice_vector<gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const OrcDecompressor *decompressor,
std::vector<orc_stream_info> &stream_info,
size_t num_stripes,
rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
cudaStream_t stream)
{
// Parse the columns' compressed info
hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream);
for (const auto &info : stream_info) {
compinfo.insert(gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) + info.dst_pos,
info.length));
}
CUDA_TRY(cudaMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream));
CUDA_TRY(cudaMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decomp_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decomp_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found");
rmm::device_buffer decomp_data(total_decomp_size, stream);
rmm::device_vector<gpu_inflate_input_s> inflate_in(num_compressed_blocks +
num_uncompressed_blocks);
rmm::device_vector<gpu_inflate_status_s> inflate_out(num_compressed_blocks);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data().get() + start_pos;
compinfo[i].decstatus = inflate_out.data().get() + start_pos;
compinfo[i].copyctl = inflate_in.data().get() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
CUDA_TRY(cudaMemcpyAsync(compinfo.device_ptr(),
compinfo.host_ptr(),
compinfo.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseCompressedStripeData(compinfo.device_ptr(),
compinfo.size(),
decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio(),
stream));
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(gpuinflate(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, 0, stream));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(
inflate_in.data().get(), inflate_out.data().get(), num_compressed_blocks, stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(
inflate_in.data().get() + num_compressed_blocks, num_uncompressed_blocks, stream));
}
CUDA_TRY(gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream));
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
CUDA_TRY(cudaMemcpyAsync(compinfo.host_ptr(),
compinfo.device_ptr(),
compinfo.memory_size(),
cudaMemcpyDeviceToHost,
stream));
CUDA_TRY(cudaStreamSynchronize(stream));
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseRowGroupIndex(row_groups.data().get(),
compinfo.device_ptr(),
chunks.device_ptr(),
num_columns,
num_stripes,
row_groups.size() / num_columns,
row_index_stride,
stream));
}
return decomp_data;
}
void reader::impl::decode_stream_data(hostdevice_vector<gpu::ColumnDesc> &chunks,
size_t num_dicts,
size_t skip_rows,
size_t num_rows,
const std::vector<int64_t> &timezone_table,
const rmm::device_vector<gpu::RowGroup> &row_groups,
size_t row_index_stride,
std::vector<column_buffer> &out_buffers,
cudaStream_t stream)
{
const auto num_columns = out_buffers.size();
const auto num_stripes = chunks.size() / out_buffers.size();
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.column_data_base = out_buffers[j].data();
chunk.valid_map_base = out_buffers[j].null_mask();
}
}
// Allocate global dictionary for deserializing
rmm::device_vector<gpu::DictionaryEntry> global_dict(num_dicts);
// Allocate timezone transition table timestamp conversion
rmm::device_vector<int64_t> tz_table = timezone_table;
CUDA_TRY(cudaMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream));
CUDA_TRY(gpu::DecodeNullsAndStringDictionaries(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
stream));
CUDA_TRY(gpu::DecodeOrcColumnData(chunks.device_ptr(),
global_dict.data().get(),
num_columns,
num_stripes,
num_rows,
skip_rows,
tz_table.data().get(),
tz_table.size(),
row_groups.data().get(),
row_groups.size() / num_columns,
row_index_stride,
stream));
CUDA_TRY(cudaMemcpyAsync(
chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
out_buffers[j].null_count() += chunks[i * num_columns + j].null_count;
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr)
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.columns, _has_timestamp_column);
// Override output timestamp resolution if requested
if (options.timestamp_type.id() != EMPTY) { _timestamp_type = options.timestamp_type; }
// Enable or disable attempt to use row index for parsing
_use_index = options.use_index;
// Enable or disable the conversion to numpy-compatible dtypes
_use_np_dtypes = options.use_np_dtypes;
// Control decimals conversion (float64 or int64 with optional scale)
_decimals_as_float = options.decimals_as_float;
_decimals_as_int_scale = options.forced_decimals_scale;
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
size_type stripe,
size_type max_stripe_count,
const size_type *stripe_indices,
cudaStream_t stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only stripes required (aka row groups)
const auto selected_stripes =
_metadata->select_stripes(stripe, max_stripe_count, stripe_indices, skip_rows, num_rows);
// Association between each ORC column and its cudf::column
std::vector<int32_t> orc_col_map(_metadata->get_num_columns(), -1);
// Get a list of column data types
std::vector<data_type> column_types;
for (const auto &col : _selected_columns) {
auto col_type = to_type_id(
_metadata->ff.types[col], _use_np_dtypes, _timestamp_type.id(), _decimals_as_float);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
// Map each ORC column to its column
orc_col_map[col] = column_types.size() - 1;
}
// If no rows or stripes to read, return empty columns
if (num_rows <= 0 || selected_stripes.size() == 0) {
std::transform(column_types.cbegin(),
column_types.cend(),
std::back_inserter(out_columns),
[](auto const &dtype) { return make_empty_column(dtype); });
} else {
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<gpu::ColumnDesc> chunks(num_chunks, stream);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(_use_index == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) &&
_metadata->get_row_index_stride() > 0 && num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
// Logically view streams as columns
std::vector<orc_stream_info> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(i,
stripe_info,
stripe_footer,
orc_col_map,
_selected_columns,
_metadata->ff.types,
use_index,
&num_dict_entries,
chunks,
stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(total_data_size, stream);
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = _source->host_read(offset, len);
CUDA_TRY(cudaMemcpyAsync(d_dst, buffer->data(), len, cudaMemcpyHostToDevice, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
// Update chunks to reference streams pointers
for (size_t j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[_selected_columns[j]].kind;
chunk.type_kind = _metadata->ff.types[_selected_columns[j]].kind;
if (_decimals_as_float) {
chunk.decimal_scale =
_metadata->ff.types[_selected_columns[j]].scale | ORC_DECIMAL2FLOAT64_SCALE;
} else if (_decimals_as_int_scale < 0) {
chunk.decimal_scale = _metadata->ff.types[_selected_columns[j]].scale;
} else {
chunk.decimal_scale = _decimals_as_int_scale;
}
chunk.rowgroup_id = num_rowgroups;
chunk.dtype_len = (column_types[j].id() == type_id::STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(column_types[j]);
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(_timestamp_type.id());
}
for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) {
if (chunk.strm_len[k] > 0) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups += (stripe_info->numberOfRows + _metadata->get_row_index_stride() - 1) /
_metadata->get_row_index_stride();
}
}
// Process dataset chunk pages into output columns
if (stripe_data.size() != 0) {
// Setup row group descriptors if using indexes
rmm::device_vector<gpu::RowGroup> row_groups(num_rowgroups * num_columns);
if (_metadata->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(chunks,
stripe_data,
_metadata->decompressor.get(),
stream_info,
selected_stripes.size(),
row_groups,
_metadata->get_row_index_stride(),
stream);
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.empty()) {
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(),
chunks.host_ptr(),
chunks.memory_size(),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(gpu::ParseRowGroupIndex(row_groups.data().get(),
nullptr,
chunks.device_ptr(),
num_columns,
selected_stripes.size(),
num_rowgroups,
_metadata->get_row_index_stride(),
stream));
}
}
// Setup table for converting timestamp columns from local to UTC time
std::vector<int64_t> tz_table;
if (_has_timestamp_column) {
CUDF_EXPECTS(
BuildTimezoneTransitionTable(tz_table, selected_stripes[0].second->writerTimezone),
"Cannot setup timezone LUT");
}
std::vector<column_buffer> out_buffers;
for (size_t i = 0; i < column_types.size(); ++i) {
bool is_nullable = false;
for (size_t j = 0; j < selected_stripes.size(); ++j) {
if (chunks[j * num_columns + i].strm_len[gpu::CI_PRESENT] != 0) {
is_nullable = true;
break;
}
}
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_stream_data(chunks,
num_dict_entries,
skip_rows,
num_rows,
tz_table,
row_groups,
_metadata->get_row_index_stride(),
out_buffers,
stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(
make_column(column_types[i], num_rows, out_buffers[i], stream, _mr));
}
}
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _metadata->ff.GetColumnName(_selected_columns[i]);
}
// Return user metadata
for (const auto &kv : _metadata->ff.metadata) {
out_metadata.user_data.insert({kv.name, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::string filepath,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr))
{
}
// Forward to implementation
reader::reader(std::unique_ptr<cudf::io::datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(std::move(source), options, mr))
{
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(cudaStream_t stream)
{
return _impl->read(0, -1, -1, -1, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_stripe(size_type stripe,
size_type stripe_count,
cudaStream_t stream)
{
return _impl->read(0, -1, stripe, stripe_count, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_stripes(const std::vector<size_type> &stripe_list,
cudaStream_t stream)
{
return _impl->read(
0, -1, -1, static_cast<size_type>(stripe_list.size()), stripe_list.data(), stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, cudaStream_t stream)
{
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, -1, nullptr, stream);
}
} // namespace orc
} // namespace detail
} // namespace io
} // namespace cudf
|
ecb6dbe7de18ab5ce352877a0cd21dc16a0923a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "BinTransformPdf.hh"
EXEC_TARGET fptype device_BinTransform (fptype* evt, fptype* p, unsigned int* indices) {
// Index structure: nP lim1 bin1 lim2 bin2 ... nO o1 o2
int numObservables = indices[1 + indices[0]];
int ret = 0;
int previousSize = 1;
//printf("[%i, %i] Bin Transform: %i %i %f %f\n", THREADIDX, BLOCKIDX, numObservables, previousSize, evt[0], evt[1]);
for (int i = 0; i < numObservables; ++i) {
fptype obsValue = evt[indices[2 + indices[0] + i]];
fptype lowerLimit = functorConstants[indices[i*3+1]];
fptype binSize = functorConstants[indices[i*3+2]];
int numBins = indices[i*3+3];
int localBin = (int) FLOOR((obsValue - lowerLimit) / binSize);
ret += localBin * previousSize;
previousSize *= numBins;
}
return fptype(ret);
}
MEM_DEVICE device_function_ptr ptr_to_BinTransform = device_BinTransform;
// Notice that bin sizes and limits can be different, for this purpose, than what's implied by the Variable members.
__host__ BinTransformPdf::BinTransformPdf (std::string n, vector<Variable*> obses, vector<fptype> limits, vector<fptype> binSizes, vector<int> numBins)
: GooPdf(0, n)
{
cIndex = registerConstants(2*obses.size());
fptype* host_constants = new fptype[2*obses.size()];
std::vector<unsigned int> pindices;
for (unsigned int i = 0; i < obses.size(); ++i) {
registerObservable(obses[i]);
pindices.push_back(cIndex + 2*i);
pindices.push_back(cIndex + 2*i + 1);
pindices.push_back(numBins[i]);
host_constants[2*i] = limits[i]; // cIndex will be accounted for by offset in memcpy
host_constants[2*i+1] = binSizes[i];
}
MEMCPY_TO_SYMBOL(functorConstants, host_constants, 2*obses.size()*sizeof(fptype), cIndex*sizeof(fptype), hipMemcpyHostToDevice);
delete[] host_constants;
GET_FUNCTION_ADDR(ptr_to_BinTransform);
initialise(pindices);
}
| ecb6dbe7de18ab5ce352877a0cd21dc16a0923a4.cu | #include "BinTransformPdf.hh"
EXEC_TARGET fptype device_BinTransform (fptype* evt, fptype* p, unsigned int* indices) {
// Index structure: nP lim1 bin1 lim2 bin2 ... nO o1 o2
int numObservables = indices[1 + indices[0]];
int ret = 0;
int previousSize = 1;
//printf("[%i, %i] Bin Transform: %i %i %f %f\n", THREADIDX, BLOCKIDX, numObservables, previousSize, evt[0], evt[1]);
for (int i = 0; i < numObservables; ++i) {
fptype obsValue = evt[indices[2 + indices[0] + i]];
fptype lowerLimit = functorConstants[indices[i*3+1]];
fptype binSize = functorConstants[indices[i*3+2]];
int numBins = indices[i*3+3];
int localBin = (int) FLOOR((obsValue - lowerLimit) / binSize);
ret += localBin * previousSize;
previousSize *= numBins;
}
return fptype(ret);
}
MEM_DEVICE device_function_ptr ptr_to_BinTransform = device_BinTransform;
// Notice that bin sizes and limits can be different, for this purpose, than what's implied by the Variable members.
__host__ BinTransformPdf::BinTransformPdf (std::string n, vector<Variable*> obses, vector<fptype> limits, vector<fptype> binSizes, vector<int> numBins)
: GooPdf(0, n)
{
cIndex = registerConstants(2*obses.size());
fptype* host_constants = new fptype[2*obses.size()];
std::vector<unsigned int> pindices;
for (unsigned int i = 0; i < obses.size(); ++i) {
registerObservable(obses[i]);
pindices.push_back(cIndex + 2*i);
pindices.push_back(cIndex + 2*i + 1);
pindices.push_back(numBins[i]);
host_constants[2*i] = limits[i]; // cIndex will be accounted for by offset in memcpy
host_constants[2*i+1] = binSizes[i];
}
MEMCPY_TO_SYMBOL(functorConstants, host_constants, 2*obses.size()*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice);
delete[] host_constants;
GET_FUNCTION_ADDR(ptr_to_BinTransform);
initialise(pindices);
}
|
f7e371026f3055a911b0a7aa702e25178008425e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/pair.h>
#include <thrust/transform.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#define N 10
struct make_pair_functor
{
template<typename T>
__host__ __device__
thrust::pair<T, T> operator() (const T &x, const T &y)
{
return thrust::make_pair(x, y);
}
};
struct pair_to_vector_first
{
template<typename T>
__host__ __device__
int operator() (const T &x)
{
return x.first;
}
};
struct pair_to_vector_second
{
template<typename T>
__host__ __device__
int operator() (const T &x)
{
return x.second;
}
};
struct accumulate_diff
{
int *keys;
int *values;
int *counts;
accumulate_diff(int *k, int *v, int *c) : keys(k), values(v), counts(c) {}
template<typename T>
__device__
void operator() (const T &i)
{
__shared__ volatile int _sd[N];
_sd[i] = counts[i];
//__threadfence_system();
if (i == 0) counts[keys[i]] = 1; //__threadfence_system();}
if (i > 0)
{
//while (keys[i] != keys[i-1] && _sd[i] == 0);
if (keys[i] != keys[i-1])
{
//_sd[i] = 1;
atomicAdd(&counts[keys[i]], 1);
//__threadfence_system();
//_sd[i] = 0;
}
else
{
if (values[i] != values[i-1])
{
//printf("777777777777777 %d\n", keys[i]);
//_sd[i] = 1;
atomicAdd(&counts[keys[i]], 1);
//__threadfence_system();
//_sd[i] = 0;
}
}
}
//__threadfence_system();
//counts[keys[i]] = _sd[keys[i]];
}
};
int main()
{
int A[N] = {1, 3, 3, 3, 3, 2, 1, 2, 2, 1};
int B[N] = {9, 8, 7, 5, 6, 7, 8, 7, 6, 9};
int C[N];
int D[N];
thrust::device_vector<int> counts(N, 0);
typedef thrust::pair<int, int> P;
thrust::host_vector<P> h_pairs(N);
thrust::transform(A, A+N, B, h_pairs.begin(), make_pair_functor());
thrust::sort(h_pairs.begin(), h_pairs.end());
thrust::transform(h_pairs.begin(), h_pairs.end(), C, pair_to_vector_first());
thrust::transform(h_pairs.begin(), h_pairs.end(), D, pair_to_vector_second());
thrust::device_vector<int> c_vec(C, C+N);
thrust::device_vector<int> d_vec(D, D+N);
//thrust::reduce_by_key(C, C+7, thrust::constant_iterator<int>(1), C, )
accumulate_diff acc(thrust::raw_pointer_cast(c_vec.data()), thrust::raw_pointer_cast(d_vec.data()), thrust::raw_pointer_cast(counts.data()));
thrust::for_each(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(N), acc);
for (int i = 0; i < N; i++)
{
std::cout << h_pairs[i].first << ": " << h_pairs[i].second << "\n";
std::cout << "=========" << C[i] << "\n";
std::cout << "++++++++++++++" << counts[i] << "\n";
}
}
| f7e371026f3055a911b0a7aa702e25178008425e.cu | #include <thrust/pair.h>
#include <thrust/transform.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#define N 10
struct make_pair_functor
{
template<typename T>
__host__ __device__
thrust::pair<T, T> operator() (const T &x, const T &y)
{
return thrust::make_pair(x, y);
}
};
struct pair_to_vector_first
{
template<typename T>
__host__ __device__
int operator() (const T &x)
{
return x.first;
}
};
struct pair_to_vector_second
{
template<typename T>
__host__ __device__
int operator() (const T &x)
{
return x.second;
}
};
struct accumulate_diff
{
int *keys;
int *values;
int *counts;
accumulate_diff(int *k, int *v, int *c) : keys(k), values(v), counts(c) {}
template<typename T>
__device__
void operator() (const T &i)
{
__shared__ volatile int _sd[N];
_sd[i] = counts[i];
//__threadfence_system();
if (i == 0) counts[keys[i]] = 1; //__threadfence_system();}
if (i > 0)
{
//while (keys[i] != keys[i-1] && _sd[i] == 0);
if (keys[i] != keys[i-1])
{
//_sd[i] = 1;
atomicAdd(&counts[keys[i]], 1);
//__threadfence_system();
//_sd[i] = 0;
}
else
{
if (values[i] != values[i-1])
{
//printf("777777777777777 %d\n", keys[i]);
//_sd[i] = 1;
atomicAdd(&counts[keys[i]], 1);
//__threadfence_system();
//_sd[i] = 0;
}
}
}
//__threadfence_system();
//counts[keys[i]] = _sd[keys[i]];
}
};
int main()
{
int A[N] = {1, 3, 3, 3, 3, 2, 1, 2, 2, 1};
int B[N] = {9, 8, 7, 5, 6, 7, 8, 7, 6, 9};
int C[N];
int D[N];
thrust::device_vector<int> counts(N, 0);
typedef thrust::pair<int, int> P;
thrust::host_vector<P> h_pairs(N);
thrust::transform(A, A+N, B, h_pairs.begin(), make_pair_functor());
thrust::sort(h_pairs.begin(), h_pairs.end());
thrust::transform(h_pairs.begin(), h_pairs.end(), C, pair_to_vector_first());
thrust::transform(h_pairs.begin(), h_pairs.end(), D, pair_to_vector_second());
thrust::device_vector<int> c_vec(C, C+N);
thrust::device_vector<int> d_vec(D, D+N);
//thrust::reduce_by_key(C, C+7, thrust::constant_iterator<int>(1), C, )
accumulate_diff acc(thrust::raw_pointer_cast(c_vec.data()), thrust::raw_pointer_cast(d_vec.data()), thrust::raw_pointer_cast(counts.data()));
thrust::for_each(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(N), acc);
for (int i = 0; i < N; i++)
{
std::cout << h_pairs[i].first << ": " << h_pairs[i].second << "\n";
std::cout << "=========" << C[i] << "\n";
std::cout << "++++++++++++++" << counts[i] << "\n";
}
}
|
b286a84537259c7af3142edb025f522951a4e461.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 128*128
__global__ void kernelMontecarlo(float *x, float *y,int *contador) {
//int i = threadIdx.x + blockIdx.x*blockDim.x;
//int j = threadIdx.y + blockIdx.y*blockDim.y;
int indice = threadIdx.x + blockIdx.x*blockDim.x;
//int indice=i;
//printf("Indice: %f\n",(x[indice]*x[indice] + y[indice]*y[indice]));
if((x[indice]*x[indice] + y[indice]*y[indice]) <=1.0) {
atomicAdd(contador,1);//contador++;
//printf("Contador: %d\n",*contador);
}
}
float calcularPI();
int main(){
//inicia semilla
float resultado=calcularPI();
printf("\nPi: %f\n",resultado);
return 0;
}
float calcularPI(){
float x[N],y[N];
int contador;
float *xD,*yD;
int *contadorD=0;
int size = sizeof(float)*N;
//crear Eventos
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/*dim3 nb(B,B);
dim3 nh(N/T,N/T);
*/
srand(time(NULL));
//memoria
memset(x, 0, N*sizeof(float));
memset(y, 0, N*sizeof(float));
//genera numeros
for(int i=0;i<N;i++){
x[i]=rand()/(RAND_MAX + 1.0f);
y[i]=rand()/(RAND_MAX + 1.0f);
//x[i]=(float)(rand()%100) / 99;
//y[i]=(float)(rand()%100) / 99;
}
hipMalloc(&xD, size);
hipMalloc(&yD, size);
hipMalloc(&contadorD, sizeof(int));
contador=0;
hipMemcpy(xD, x, size, hipMemcpyHostToDevice);
hipMemcpy(yD, y, size, hipMemcpyHostToDevice);
hipMemcpy(contadorD, &contador, sizeof(int), hipMemcpyHostToDevice);
//inicio
hipEventRecord(start);
hipLaunchKernelGGL(( kernelMontecarlo), dim3(N/32), dim3(32), 0, 0, xD, yD,contadorD);
hipMemcpy(&contador, contadorD, sizeof(int), hipMemcpyDeviceToHost);
//for(int i=0; i<N;i++)
// if((x[i]*x[i] + y[i]*y[i]) <=1.0) contador++;
//float resultado= 4*contador/N;
//final
hipEventRecord(stop);
//sincronizar
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Tiempo: %f milisegundos\n",milliseconds);
float resultado=4*contador/float(N);
return resultado;
} | b286a84537259c7af3142edb025f522951a4e461.cu | #include <stdio.h>
#define N 128*128
__global__ void kernelMontecarlo(float *x, float *y,int *contador) {
//int i = threadIdx.x + blockIdx.x*blockDim.x;
//int j = threadIdx.y + blockIdx.y*blockDim.y;
int indice = threadIdx.x + blockIdx.x*blockDim.x;
//int indice=i;
//printf("Indice: %f\n",(x[indice]*x[indice] + y[indice]*y[indice]));
if((x[indice]*x[indice] + y[indice]*y[indice]) <=1.0) {
atomicAdd(contador,1);//contador++;
//printf("Contador: %d\n",*contador);
}
}
float calcularPI();
int main(){
//inicia semilla
float resultado=calcularPI();
printf("\nPi: %f\n",resultado);
return 0;
}
float calcularPI(){
float x[N],y[N];
int contador;
float *xD,*yD;
int *contadorD=0;
int size = sizeof(float)*N;
//crear Eventos
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*dim3 nb(B,B);
dim3 nh(N/T,N/T);
*/
srand(time(NULL));
//memoria
memset(x, 0, N*sizeof(float));
memset(y, 0, N*sizeof(float));
//genera numeros
for(int i=0;i<N;i++){
x[i]=rand()/(RAND_MAX + 1.0f);
y[i]=rand()/(RAND_MAX + 1.0f);
//x[i]=(float)(rand()%100) / 99;
//y[i]=(float)(rand()%100) / 99;
}
cudaMalloc(&xD, size);
cudaMalloc(&yD, size);
cudaMalloc(&contadorD, sizeof(int));
contador=0;
cudaMemcpy(xD, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(yD, y, size, cudaMemcpyHostToDevice);
cudaMemcpy(contadorD, &contador, sizeof(int), cudaMemcpyHostToDevice);
//inicio
cudaEventRecord(start);
kernelMontecarlo<<< N/32, 32>>>(xD, yD,contadorD);
cudaMemcpy(&contador, contadorD, sizeof(int), cudaMemcpyDeviceToHost);
//for(int i=0; i<N;i++)
// if((x[i]*x[i] + y[i]*y[i]) <=1.0) contador++;
//float resultado= 4*contador/N;
//final
cudaEventRecord(stop);
//sincronizar
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Tiempo: %f milisegundos\n",milliseconds);
float resultado=4*contador/float(N);
return resultado;
} |
dee032dc9acdc355f5e3f82c7d28360b2529b9b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// shared memory d_in d_out version
#include "kernel.h"
#define TX 32
#define TY 32
#define RAD 1
int divUp(int a, int b) { return (a + b - 1) / b; }
__device__ unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__device__ int idxClip(int idx, int idxMax) {
return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx);
}
__device__ int flatten(int col, int row, int width, int height) {
return idxClip(col, width) + idxClip(row, height)*width;
}
__global__ void sharpenKernel(uchar4 *d_out, const uchar4 *d_in, const float *d_filter, int w, int h) {
const int c = threadIdx.x + blockDim.x*blockIdx.x;
const int r = threadIdx.y + blockDim.y*blockIdx.y;
if ((c >= w) || (r >= h)) return;
const int i = flatten(c, r, w, h);
const int s_c = threadIdx.x + RAD;
const int s_r = threadIdx.y + RAD;
const int s_w = blockDim.x + 2 * RAD;
const int s_h = blockDim.y + 2 * RAD;
const int s_i = flatten(s_c, s_r, s_w, s_h);
const int fltSz = 2 * RAD + 1;
extern __shared__ uchar4 s_block[];
uchar4 *s_in = s_block;
uchar4 *s_out = &s_block[s_w*s_h];
//Regular cells
s_in[s_i] = d_in[i];
//Halo cells
if (threadIdx.x < RAD && threadIdx.y < RAD) {
s_in[flatten(s_c - RAD, s_r - RAD, s_w, s_h)] = d_in[flatten(c - RAD, r - RAD, w, h)];
s_in[flatten(s_c + blockDim.x, s_r - RAD, s_w, s_h)] = d_in[flatten(c + blockDim.x, r - RAD, w, h)];
s_in[flatten(s_c - RAD, s_r + blockDim.y, s_w, s_h)] = d_in[flatten(c - RAD, r + blockDim.y, w, h)];
s_in[flatten(s_c + blockDim.x, s_r + blockDim.y, s_w, s_h)] = d_in[flatten(c + blockDim.x, r + blockDim.y, w, h)];
}
if (threadIdx.x < RAD) {
s_in[flatten(s_c - RAD, s_r, s_w, s_h)] =
d_in[flatten(c - RAD, r, w, h)];
s_in[flatten(s_c + blockDim.x, s_r, s_w, s_h)] =
d_in[flatten(c + blockDim.x, r, w, h)];
}
if (threadIdx.y < RAD) {
s_in[flatten(s_c, s_r - RAD, s_w, s_h)] =
d_in[flatten(c, r - RAD, w, h)];
s_in[flatten(s_c, s_r + blockDim.y, s_w, s_h)]
= d_in[flatten(c, r + blockDim.y, w, h)];
}
__syncthreads();
float rgb[3] = { 0.f,0.f,0.f };
for (int rd = -RAD; rd <= RAD; rd++) {
for (int cd = -RAD; cd <= RAD; cd++) {
const int s_imgIdx = flatten(s_c + cd, s_r + rd, s_w, s_h);
const int fltIdx = flatten(RAD + cd, RAD + rd, fltSz, fltSz);
const uchar4 color = s_in[s_imgIdx];
const float weight = d_filter[fltIdx];
rgb[0] += weight * color.x;
rgb[1] += weight * color.y;
rgb[2] += weight * color.z;
}
}
const int s_outIdx = threadIdx.y * blockDim.x + threadIdx.x;
s_out[s_outIdx].x = clip(rgb[0]);
s_out[s_outIdx].y = clip(rgb[1]);
s_out[s_outIdx].z = clip(rgb[2]);
__syncthreads();
d_out[i] = s_out[s_outIdx];
}
void sharpenParallel(uchar4 *arr, int w, int h) {
const int fltSz = 2 * RAD + 1;
const float filter[9] = { 0.005, 0.025, 0.005,
0.025, -0.1, 0.025,
0.005, 0.025, 0.005 };
uchar4 *d_in = 0, *d_out = 0;
float *d_filter = 0;
hipMalloc(&d_in, w*h * sizeof(uchar4));
hipMemcpy(d_in, arr, w*h * sizeof(uchar4), hipMemcpyHostToDevice);
hipMalloc(&d_out, w*h * sizeof(uchar4));
hipMalloc(&d_filter, fltSz*fltSz * sizeof(float));
hipMemcpy(d_filter, filter, fltSz*fltSz * sizeof(float), hipMemcpyHostToDevice);
const dim3 blockSize(TX, TY);
const dim3 gridSize(divUp(w, TX), divUp(h, TY));
const size_t smSz = (TX + 2 * RAD)*(TY + 2 * RAD) * sizeof(uchar4) +(TX * TY)*sizeof(uchar4);
sharpenKernel << <gridSize, blockSize, smSz >> > (d_out, d_in, d_filter, w, h);
hipMemcpy(arr, d_out, w*h * sizeof(uchar4), hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
hipFree(d_filter);
}
| dee032dc9acdc355f5e3f82c7d28360b2529b9b0.cu | // shared memory d_in d_out version
#include "kernel.h"
#define TX 32
#define TY 32
#define RAD 1
int divUp(int a, int b) { return (a + b - 1) / b; }
__device__ unsigned char clip(int n) {
return n > 255 ? 255 : (n < 0 ? 0 : n);
}
__device__ int idxClip(int idx, int idxMax) {
return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx);
}
__device__ int flatten(int col, int row, int width, int height) {
return idxClip(col, width) + idxClip(row, height)*width;
}
__global__ void sharpenKernel(uchar4 *d_out, const uchar4 *d_in, const float *d_filter, int w, int h) {
const int c = threadIdx.x + blockDim.x*blockIdx.x;
const int r = threadIdx.y + blockDim.y*blockIdx.y;
if ((c >= w) || (r >= h)) return;
const int i = flatten(c, r, w, h);
const int s_c = threadIdx.x + RAD;
const int s_r = threadIdx.y + RAD;
const int s_w = blockDim.x + 2 * RAD;
const int s_h = blockDim.y + 2 * RAD;
const int s_i = flatten(s_c, s_r, s_w, s_h);
const int fltSz = 2 * RAD + 1;
extern __shared__ uchar4 s_block[];
uchar4 *s_in = s_block;
uchar4 *s_out = &s_block[s_w*s_h];
//Regular cells
s_in[s_i] = d_in[i];
//Halo cells
if (threadIdx.x < RAD && threadIdx.y < RAD) {
s_in[flatten(s_c - RAD, s_r - RAD, s_w, s_h)] = d_in[flatten(c - RAD, r - RAD, w, h)];
s_in[flatten(s_c + blockDim.x, s_r - RAD, s_w, s_h)] = d_in[flatten(c + blockDim.x, r - RAD, w, h)];
s_in[flatten(s_c - RAD, s_r + blockDim.y, s_w, s_h)] = d_in[flatten(c - RAD, r + blockDim.y, w, h)];
s_in[flatten(s_c + blockDim.x, s_r + blockDim.y, s_w, s_h)] = d_in[flatten(c + blockDim.x, r + blockDim.y, w, h)];
}
if (threadIdx.x < RAD) {
s_in[flatten(s_c - RAD, s_r, s_w, s_h)] =
d_in[flatten(c - RAD, r, w, h)];
s_in[flatten(s_c + blockDim.x, s_r, s_w, s_h)] =
d_in[flatten(c + blockDim.x, r, w, h)];
}
if (threadIdx.y < RAD) {
s_in[flatten(s_c, s_r - RAD, s_w, s_h)] =
d_in[flatten(c, r - RAD, w, h)];
s_in[flatten(s_c, s_r + blockDim.y, s_w, s_h)]
= d_in[flatten(c, r + blockDim.y, w, h)];
}
__syncthreads();
float rgb[3] = { 0.f,0.f,0.f };
for (int rd = -RAD; rd <= RAD; rd++) {
for (int cd = -RAD; cd <= RAD; cd++) {
const int s_imgIdx = flatten(s_c + cd, s_r + rd, s_w, s_h);
const int fltIdx = flatten(RAD + cd, RAD + rd, fltSz, fltSz);
const uchar4 color = s_in[s_imgIdx];
const float weight = d_filter[fltIdx];
rgb[0] += weight * color.x;
rgb[1] += weight * color.y;
rgb[2] += weight * color.z;
}
}
const int s_outIdx = threadIdx.y * blockDim.x + threadIdx.x;
s_out[s_outIdx].x = clip(rgb[0]);
s_out[s_outIdx].y = clip(rgb[1]);
s_out[s_outIdx].z = clip(rgb[2]);
__syncthreads();
d_out[i] = s_out[s_outIdx];
}
void sharpenParallel(uchar4 *arr, int w, int h) {
const int fltSz = 2 * RAD + 1;
const float filter[9] = { 0.005, 0.025, 0.005,
0.025, -0.1, 0.025,
0.005, 0.025, 0.005 };
uchar4 *d_in = 0, *d_out = 0;
float *d_filter = 0;
cudaMalloc(&d_in, w*h * sizeof(uchar4));
cudaMemcpy(d_in, arr, w*h * sizeof(uchar4), cudaMemcpyHostToDevice);
cudaMalloc(&d_out, w*h * sizeof(uchar4));
cudaMalloc(&d_filter, fltSz*fltSz * sizeof(float));
cudaMemcpy(d_filter, filter, fltSz*fltSz * sizeof(float), cudaMemcpyHostToDevice);
const dim3 blockSize(TX, TY);
const dim3 gridSize(divUp(w, TX), divUp(h, TY));
const size_t smSz = (TX + 2 * RAD)*(TY + 2 * RAD) * sizeof(uchar4) +(TX * TY)*sizeof(uchar4);
sharpenKernel << <gridSize, blockSize, smSz >> > (d_out, d_in, d_filter, w, h);
cudaMemcpy(arr, d_out, w*h * sizeof(uchar4), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_filter);
}
|
e1c43008968b1a975a3fdfc1b51b072359bb7e2b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "typedefs.h"
#include "device_launch_parameters.h"
#include "util.hcu"
#include "cutil_math.h"
#include "tables.h"
using namespace std;
extern "C" {
__constant__ uint d_edgeTable[EDGE_SIZE];
__constant__ uint d_triTable[TRI_ROWS][TRI_COLS];
const uint MAX_TRIANGLES = 15;
void allocateTables() {
hipMemcpyToSymbol(d_edgeTable, edgeTable, sizeof(edgeTable));
hipMemcpyToSymbol(d_triTable, triTable, sizeof(triTable));
}
__device__
float3 cornerValue(const uint3 co, const float3 minX, const float3 dx) {
return make_float3(minX.x + co.x*dx.x, minX.y + co.y*dx.y, minX.z + co.z*dx.z);
}
__device__
float func(float3 co) {
return co.x*co.x + co.y*co.y + co.z*co.z - 1;
}
__device__
void interpValues(float isoValue, const float v0, const float v1, float3 p0, float3 p1, float3& out) {
float mu = (isoValue - v0) / (v1 - v0);
out = lerp(p0, p1, mu);
}
__global__
void simpleKernel(float isoValue, dim3 dims, float3 minX, float3 dx, float3* out, uint* count) {
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
uint3 co = idx_to_co(idx, dims);
float3 corners[8];
corners[0] = cornerValue(co, minX, dx);
corners[1] = corners[0] + make_float3(dx.x, 0, 0);
corners[2] = corners[0] + make_float3(dx.x, dx.y, 0);
corners[3] = corners[0] + make_float3(0, dx.y, 0);
corners[4] = corners[0] + make_float3(0, 0, dx.z);
corners[5] = corners[0] + make_float3(dx.x, 0, dx.z);
corners[6] = corners[0] + make_float3(dx.x, dx.y, dx.z);
corners[7] = corners[0] + make_float3(0, dx.y, dx.z);
float value[8];
for (int i = 0; i < 8; i++) {
value[i] = func(corners[i]);
}
uint cubeindex;
cubeindex = uint(value[0] < isoValue);
cubeindex += uint(value[1] < isoValue)*2;
cubeindex += uint(value[2] < isoValue)*4;
cubeindex += uint(value[3] < isoValue)*8;
cubeindex += uint(value[4] < isoValue)*16;
cubeindex += uint(value[5] < isoValue)*32;
cubeindex += uint(value[6] < isoValue)*64;
cubeindex += uint(value[7] < isoValue)*128;
float3 vertList[12];
if (d_edgeTable[cubeindex] & 1)
interpValues(isoValue,value[0],value[1],corners[0],corners[1], vertList[0]);
if (d_edgeTable[cubeindex] & 2)
interpValues(isoValue,value[1],value[2],corners[1],corners[2], vertList[1]);
if (d_edgeTable[cubeindex] & 4)
interpValues(isoValue,value[2],value[3],corners[2],corners[3], vertList[2]);
if (d_edgeTable[cubeindex] & 8)
interpValues(isoValue,value[3],value[0],corners[3],corners[0], vertList[3]);
if (d_edgeTable[cubeindex] & 16)
interpValues(isoValue,value[4],value[5],corners[4],corners[5], vertList[4]);
if (d_edgeTable[cubeindex] & 32)
interpValues(isoValue,value[5],value[6],corners[5],corners[6], vertList[5]);
if (d_edgeTable[cubeindex] & 64)
interpValues(isoValue,value[6],value[7],corners[6],corners[7], vertList[6]);
if (d_edgeTable[cubeindex] & 128)
interpValues(isoValue,value[7],value[4],corners[7],corners[4], vertList[7]);
if (d_edgeTable[cubeindex] & 256)
interpValues(isoValue,value[0],value[4],corners[0],corners[4], vertList[8]);
if (d_edgeTable[cubeindex] & 512)
interpValues(isoValue,value[1],value[5],corners[1],corners[5], vertList[9]);
if (d_edgeTable[cubeindex] & 1024)
interpValues(isoValue,value[2],value[6],corners[2],corners[6], vertList[10]);
if (d_edgeTable[cubeindex] & 2048)
interpValues(isoValue,value[3],value[7],corners[3],corners[7], vertList[11]);
uint i = 0;
uint offset = idx*MAX_TRIANGLES;
count[idx] = 25;
for (; i < MAX_TRIANGLES; i++) {
uint edge = d_triTable[cubeindex][i];
count[idx] = i;
if (edge == 255) break;
out[offset + i] = vertList[edge];
}
count[idx] = i;
}
int main() {
allocateTables();
uint3 dims = make_uint3(100, 200, 200);
float3 min = make_float3(1, 1, 1)*-3;
float3 dx = make_float3(0.2f, 0.2f, 0.2f);
const uint N = prod(dims);
uint* d_count;
float3* d_pos;
hipMalloc((void **) &d_count, N*sizeof(uint));
hipMalloc((void **) &d_pos, N*MAX_TRIANGLES*sizeof(float3));
hipLaunchKernelGGL(( simpleKernel) , dim3(N/200), dim3(200) , 0, 0, 0, dims, min, dx, d_pos, d_count);
uint* h_count = new uint[N];
float3* h_pos = new float3[N*MAX_TRIANGLES];
hipMemcpy(h_count, d_count, N * sizeof(uint), hipMemcpyDeviceToHost);
hipMemcpy(h_pos, d_pos, N * MAX_TRIANGLES * sizeof(float3), hipMemcpyDeviceToHost);
for (uint i = 0; i < N; i++) {
uint h = h_count[i];
if (!h) continue;
cout << h << endl;
for (uint j = 0; j < h; j++) {
float3 f = h_pos[i*MAX_TRIANGLES + j];
cout << f.x << " " << f.y << " " << f.z << " " << f.x*f.x + f.y*f.y + f.z*f.z << endl;
}
cout << endl;
}
return 1;
}
} | e1c43008968b1a975a3fdfc1b51b072359bb7e2b.cu | #include <iostream>
#include <cuda.h>
#include "cuda_runtime.h"
#include "typedefs.h"
#include "device_launch_parameters.h"
#include "util.hcu"
#include "cutil_math.h"
#include "tables.h"
using namespace std;
extern "C" {
__constant__ uint d_edgeTable[EDGE_SIZE];
__constant__ uint d_triTable[TRI_ROWS][TRI_COLS];
const uint MAX_TRIANGLES = 15;
void allocateTables() {
cudaMemcpyToSymbol(d_edgeTable, edgeTable, sizeof(edgeTable));
cudaMemcpyToSymbol(d_triTable, triTable, sizeof(triTable));
}
__device__
float3 cornerValue(const uint3 co, const float3 minX, const float3 dx) {
return make_float3(minX.x + co.x*dx.x, minX.y + co.y*dx.y, minX.z + co.z*dx.z);
}
__device__
float func(float3 co) {
return co.x*co.x + co.y*co.y + co.z*co.z - 1;
}
__device__
void interpValues(float isoValue, const float v0, const float v1, float3 p0, float3 p1, float3& out) {
float mu = (isoValue - v0) / (v1 - v0);
out = lerp(p0, p1, mu);
}
__global__
void simpleKernel(float isoValue, dim3 dims, float3 minX, float3 dx, float3* out, uint* count) {
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
uint3 co = idx_to_co(idx, dims);
float3 corners[8];
corners[0] = cornerValue(co, minX, dx);
corners[1] = corners[0] + make_float3(dx.x, 0, 0);
corners[2] = corners[0] + make_float3(dx.x, dx.y, 0);
corners[3] = corners[0] + make_float3(0, dx.y, 0);
corners[4] = corners[0] + make_float3(0, 0, dx.z);
corners[5] = corners[0] + make_float3(dx.x, 0, dx.z);
corners[6] = corners[0] + make_float3(dx.x, dx.y, dx.z);
corners[7] = corners[0] + make_float3(0, dx.y, dx.z);
float value[8];
for (int i = 0; i < 8; i++) {
value[i] = func(corners[i]);
}
uint cubeindex;
cubeindex = uint(value[0] < isoValue);
cubeindex += uint(value[1] < isoValue)*2;
cubeindex += uint(value[2] < isoValue)*4;
cubeindex += uint(value[3] < isoValue)*8;
cubeindex += uint(value[4] < isoValue)*16;
cubeindex += uint(value[5] < isoValue)*32;
cubeindex += uint(value[6] < isoValue)*64;
cubeindex += uint(value[7] < isoValue)*128;
float3 vertList[12];
if (d_edgeTable[cubeindex] & 1)
interpValues(isoValue,value[0],value[1],corners[0],corners[1], vertList[0]);
if (d_edgeTable[cubeindex] & 2)
interpValues(isoValue,value[1],value[2],corners[1],corners[2], vertList[1]);
if (d_edgeTable[cubeindex] & 4)
interpValues(isoValue,value[2],value[3],corners[2],corners[3], vertList[2]);
if (d_edgeTable[cubeindex] & 8)
interpValues(isoValue,value[3],value[0],corners[3],corners[0], vertList[3]);
if (d_edgeTable[cubeindex] & 16)
interpValues(isoValue,value[4],value[5],corners[4],corners[5], vertList[4]);
if (d_edgeTable[cubeindex] & 32)
interpValues(isoValue,value[5],value[6],corners[5],corners[6], vertList[5]);
if (d_edgeTable[cubeindex] & 64)
interpValues(isoValue,value[6],value[7],corners[6],corners[7], vertList[6]);
if (d_edgeTable[cubeindex] & 128)
interpValues(isoValue,value[7],value[4],corners[7],corners[4], vertList[7]);
if (d_edgeTable[cubeindex] & 256)
interpValues(isoValue,value[0],value[4],corners[0],corners[4], vertList[8]);
if (d_edgeTable[cubeindex] & 512)
interpValues(isoValue,value[1],value[5],corners[1],corners[5], vertList[9]);
if (d_edgeTable[cubeindex] & 1024)
interpValues(isoValue,value[2],value[6],corners[2],corners[6], vertList[10]);
if (d_edgeTable[cubeindex] & 2048)
interpValues(isoValue,value[3],value[7],corners[3],corners[7], vertList[11]);
uint i = 0;
uint offset = idx*MAX_TRIANGLES;
count[idx] = 25;
for (; i < MAX_TRIANGLES; i++) {
uint edge = d_triTable[cubeindex][i];
count[idx] = i;
if (edge == 255) break;
out[offset + i] = vertList[edge];
}
count[idx] = i;
}
int main() {
allocateTables();
uint3 dims = make_uint3(100, 200, 200);
float3 min = make_float3(1, 1, 1)*-3;
float3 dx = make_float3(0.2f, 0.2f, 0.2f);
const uint N = prod(dims);
uint* d_count;
float3* d_pos;
cudaMalloc((void **) &d_count, N*sizeof(uint));
cudaMalloc((void **) &d_pos, N*MAX_TRIANGLES*sizeof(float3));
simpleKernel <<< N/200, 200 >>> (0, dims, min, dx, d_pos, d_count);
uint* h_count = new uint[N];
float3* h_pos = new float3[N*MAX_TRIANGLES];
cudaMemcpy(h_count, d_count, N * sizeof(uint), cudaMemcpyDeviceToHost);
cudaMemcpy(h_pos, d_pos, N * MAX_TRIANGLES * sizeof(float3), cudaMemcpyDeviceToHost);
for (uint i = 0; i < N; i++) {
uint h = h_count[i];
if (!h) continue;
cout << h << endl;
for (uint j = 0; j < h; j++) {
float3 f = h_pos[i*MAX_TRIANGLES + j];
cout << f.x << " " << f.y << " " << f.z << " " << f.x*f.x + f.y*f.y + f.z*f.z << endl;
}
cout << endl;
}
return 1;
}
} |
0be9c43c7b7474d8e138487aab8318bb9293caa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* main.cu
*
* Created on: Nov 14, 2019
* Author: cuda-s01
*/
#include <stdio.h>
__global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) {
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
//debug line:
//printf("Row:%d, Col:%d. BlockIdx(%d,%d), blockDim(%d,%d) threadIdx(%d,%d)\n\n",Row,Col,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y,threadIdx.x,threadIdx.y);
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
else P[Row*Width+Col] = 99.9;
}
void matrixMultiplication(float *M, float *N, float *P, int Width){
// declare the number of blocks per grid and the number of threads per block
int th = Width;
int bl = 1;
dim3 threadsPerBlock(th,th);
dim3 blocksPerGrid(bl,bl);
printf("Kernel started: %d blocks, %d threads.\n", bl, th);
hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, M, N, P, Width);
}
void PrintMatrix(float* M, int Width)
{
for(int i = 0; i < Width; i++)
{
for(int j = 0; j < Width; j++)
printf("%f ",M[i*Width+j]);
printf("\n");
}
printf("\n");
}
int main(void)
{
printf("Starting the program:\n");
hipError_t err = hipSuccess;
int matrix_size = 10;
int num_of_elements = matrix_size * matrix_size;
size_t size = num_of_elements * sizeof(float);
printf("matrix [%d x %d] multiplication.\n", matrix_size, matrix_size);
//==========================HOST===============================================
//allocate matrixes (two input ones, one output one):
//matrix can be represented as a flat vector in the memory - it is so in GPU,
//so for simplification of indexation I also use this representation on the host
printf("Started variables allocation for the host.\n");
float *M_h = (float *)malloc(size);
float *N_h = (float *)malloc(size);
float *P_h = (float *)malloc(size);
if(M_h == NULL || N_h == NULL || P_h == NULL)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
}else printf("Allocation on host successful.\n");
//initialize matrices:
printf("Started initialization.\n");
for(int i = 0; i < num_of_elements; i++)
{
M_h[i] = rand()/(float)RAND_MAX;
N_h[i] = rand()/(float)RAND_MAX;
}
printf("Initialization fnished.\n");
//==========================DEVICE==============================================
//allocate matrixes on the device:
printf("Started variables allocation for the device.\n");
printf("First matrix.\n");
float *M_d;
err = hipMalloc((void**)&M_d, size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
} else printf("Allocation successful.\n");
printf("Second matrix.\n");
float *N_d;
err = hipMalloc((void**)&N_d, size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
} else printf("Allocation successful.\n");
printf("Third matrix.\n");
float *P_d;
err = hipMalloc((void**)&P_d, size);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
} else printf("Allocation successful.\n");
//copy marices into the device:
printf("Started variables copying into the device.\n");
printf("First matrix.\n");
err = hipMemcpy(M_d, M_h, size, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy first matrix!\n");
exit(EXIT_FAILURE);
} else printf("Copying successful.\n");
printf("Second matrix.\n");
err = hipMemcpy(N_d, N_h, size, hipMemcpyHostToDevice);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy second matrix!\n");
exit(EXIT_FAILURE);
} else printf("Copying successful.\n");
//calculations:
matrixMultiplication(M_d, N_d, P_d, matrix_size);
err = hipGetLastError();
if(err != hipSuccess)
{
fprintf(stderr, "Failed to launch kernel. Error: %s.\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
} else printf("Kerel operations successful.\n");
printf("Started variables copying from the device.\n");
err = hipMemcpy(P_h, P_d, size, hipMemcpyDeviceToHost);
if(err != hipSuccess)
{
fprintf(stderr, "Failed to copy result matrix. Error: %s.\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
} else printf("Copying successful.\n");
//==========================TEST===============================================
//PrintMatrix(M_h, matrix_size);
//PrintMatrix(N_h, matrix_size);
//PrintMatrix(P_h, matrix_size);
for(int i = 0; i < matrix_size; i++)
{
for(int j = 0; j < matrix_size; j++)
{
float tmp = 0;
for(int k = 0; k < matrix_size; k++)
tmp += M_h[i*matrix_size + k] * N_h[k*matrix_size + j];
//debug line:
//printf("%f ",tmp);
if(fabs(tmp - P_h[i*matrix_size + j]) > 1e-3)
{
fprintf(stderr, "Verification test failed.!\nElement at index (%d, %d) should be %f, but is %f. \n",
i,j,tmp,P_h[i*matrix_size + j]);
exit(EXIT_FAILURE);
}
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(M_d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(N_d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(P_d);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(M_h);
free(N_h);
free(P_h);
printf("Done\n");
return 0;
}
| 0be9c43c7b7474d8e138487aab8318bb9293caa3.cu | /*
* main.cu
*
* Created on: Nov 14, 2019
* Author: cuda-s01
*/
#include <stdio.h>
__global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) {
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
//debug line:
//printf("Row:%d, Col:%d. BlockIdx(%d,%d), blockDim(%d,%d) threadIdx(%d,%d)\n\n",Row,Col,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y,threadIdx.x,threadIdx.y);
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
// each thread computes one element of the block sub-matrix
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
else P[Row*Width+Col] = 99.9;
}
void matrixMultiplication(float *M, float *N, float *P, int Width){
// declare the number of blocks per grid and the number of threads per block
int th = Width;
int bl = 1;
dim3 threadsPerBlock(th,th);
dim3 blocksPerGrid(bl,bl);
printf("Kernel started: %d blocks, %d threads.\n", bl, th);
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(M, N, P, Width);
}
void PrintMatrix(float* M, int Width)
{
for(int i = 0; i < Width; i++)
{
for(int j = 0; j < Width; j++)
printf("%f ",M[i*Width+j]);
printf("\n");
}
printf("\n");
}
int main(void)
{
printf("Starting the program:\n");
cudaError_t err = cudaSuccess;
int matrix_size = 10;
int num_of_elements = matrix_size * matrix_size;
size_t size = num_of_elements * sizeof(float);
printf("matrix [%d x %d] multiplication.\n", matrix_size, matrix_size);
//==========================HOST===============================================
//allocate matrixes (two input ones, one output one):
//matrix can be represented as a flat vector in the memory - it is so in GPU,
//so for simplification of indexation I also use this representation on the host
printf("Started variables allocation for the host.\n");
float *M_h = (float *)malloc(size);
float *N_h = (float *)malloc(size);
float *P_h = (float *)malloc(size);
if(M_h == NULL || N_h == NULL || P_h == NULL)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
}else printf("Allocation on host successful.\n");
//initialize matrices:
printf("Started initialization.\n");
for(int i = 0; i < num_of_elements; i++)
{
M_h[i] = rand()/(float)RAND_MAX;
N_h[i] = rand()/(float)RAND_MAX;
}
printf("Initialization fnished.\n");
//==========================DEVICE==============================================
//allocate matrixes on the device:
printf("Started variables allocation for the device.\n");
printf("First matrix.\n");
float *M_d;
err = cudaMalloc((void**)&M_d, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
} else printf("Allocation successful.\n");
printf("Second matrix.\n");
float *N_d;
err = cudaMalloc((void**)&N_d, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
} else printf("Allocation successful.\n");
printf("Third matrix.\n");
float *P_d;
err = cudaMalloc((void**)&P_d, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate host matrix!\n");
exit(EXIT_FAILURE);
} else printf("Allocation successful.\n");
//copy marices into the device:
printf("Started variables copying into the device.\n");
printf("First matrix.\n");
err = cudaMemcpy(M_d, M_h, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy first matrix!\n");
exit(EXIT_FAILURE);
} else printf("Copying successful.\n");
printf("Second matrix.\n");
err = cudaMemcpy(N_d, N_h, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy second matrix!\n");
exit(EXIT_FAILURE);
} else printf("Copying successful.\n");
//calculations:
matrixMultiplication(M_d, N_d, P_d, matrix_size);
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel. Error: %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
} else printf("Kerel operations successful.\n");
printf("Started variables copying from the device.\n");
err = cudaMemcpy(P_h, P_d, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to copy result matrix. Error: %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
} else printf("Copying successful.\n");
//==========================TEST===============================================
//PrintMatrix(M_h, matrix_size);
//PrintMatrix(N_h, matrix_size);
//PrintMatrix(P_h, matrix_size);
for(int i = 0; i < matrix_size; i++)
{
for(int j = 0; j < matrix_size; j++)
{
float tmp = 0;
for(int k = 0; k < matrix_size; k++)
tmp += M_h[i*matrix_size + k] * N_h[k*matrix_size + j];
//debug line:
//printf("%f ",tmp);
if(fabs(tmp - P_h[i*matrix_size + j]) > 1e-3)
{
fprintf(stderr, "Verification test failed.!\nElement at index (%d, %d) should be %f, but is %f. \n",
i,j,tmp,P_h[i*matrix_size + j]);
exit(EXIT_FAILURE);
}
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(M_d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(N_d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(P_d);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(M_h);
free(N_h);
free(P_h);
printf("Done\n");
return 0;
}
|
256a471fee15a995e720d1016a371752f6ce2c52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
// declare texture reference for 1D float texture
texture<float4, 1, hipReadModeElementType> tex;
texture<float, 1, hipReadModeElementType> txt;
__device__ float4 sortElem(float4 r) {
float4 nr;
nr.x = (r.x > r.y) ? r.y : r.x;
nr.y = (r.y > r.x) ? r.y : r.x;
nr.z = (r.z > r.w) ? r.w : r.z;
nr.w = (r.w > r.z) ? r.w : r.z;
r.x = (nr.x > nr.z) ? nr.z : nr.x;
r.y = (nr.y > nr.w) ? nr.w : nr.y;
r.z = (nr.z > nr.x) ? nr.z : nr.x;
r.w = (nr.w > nr.y) ? nr.w : nr.y;
nr.x = r.x;
nr.y = (r.y > r.z) ? r.z : r.y;
nr.z = (r.z > r.y) ? r.z : r.y;
nr.w = r.w;
return nr;
}
__device__ float4 getLowest(float4 a, float4 b)
{
//float4 na;
a.x = (a.x < b.w) ? a.x : b.w;
a.y = (a.y < b.z) ? a.y : b.z;
a.z = (a.z < b.y) ? a.z : b.y;
a.w = (a.w < b.x) ? a.w : b.x;
return a;
}
__device__ float4 getHighest(float4 a, float4 b)
{
b.x = (a.w >= b.x) ? a.w : b.x;
b.y = (a.z >= b.y) ? a.z : b.y;
b.z = (a.y >= b.z) ? a.y : b.z;
b.w = (a.x >= b.w) ? a.x : b.w;
return b;
}
__constant__ int constStartAddr[DIVISIONS + 1];
__constant__ int finalStartAddr[DIVISIONS + 1];
__constant__ int nullElems[DIVISIONS];
__global__ void
mergeSortFirst(float4 *result, int listsize)
{
// Block index
int bx = blockIdx.x;
// Thread index
//int tx = threadIdx.x;
if(bx*blockDim.x + threadIdx.x < listsize/4){
float4 r = tex1Dfetch(tex, (int)(bx*blockDim.x + threadIdx.x));
result[bx * blockDim.x + threadIdx.x] = sortElem(r);
}
}
__global__ void
mergeSortPass(float4 *result, int nrElems, int threadsPerDiv)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// The division to work on
int division = tid / threadsPerDiv;
if(division >= DIVISIONS) return;
// The block within the division
int int_tid = tid - division * threadsPerDiv;
int Astart = constStartAddr[division] + int_tid * nrElems;
int Bstart = Astart + nrElems/2;
float4 *resStart = &(result[Astart]);
if(Astart >= constStartAddr[division + 1])
return;
if(Bstart >= constStartAddr[division + 1]){
for(int i=0; i<(constStartAddr[division + 1] - Astart); i++)
{
resStart[i] = tex1Dfetch(tex, Astart + i);
}
return;
}
int aidx = 0;
int bidx = 0;
int outidx = 0;
float4 a, b;
a = tex1Dfetch(tex, Astart + aidx);
b = tex1Dfetch(tex, Bstart + bidx);
while(true)//aidx < nrElems/2)// || (bidx < nrElems/2 && (Bstart + bidx < constEndAddr[division])))
{
/**
* For some reason, it's faster to do the texture fetches here than
* after the merge
*/
float4 nextA = tex1Dfetch(tex, Astart + aidx + 1);
float4 nextB = tex1Dfetch(tex, Bstart + bidx + 1);
float4 na = getLowest(a,b);
float4 nb = getHighest(a,b);
a = sortElem(na);
b = sortElem(nb);
// Now, a contains the lowest four elements, sorted
resStart[outidx++] = a;
bool elemsLeftInA;
bool elemsLeftInB;
elemsLeftInA = (aidx + 1 < nrElems/2); // Astart + aidx + 1 is allways less than division border
elemsLeftInB = (bidx + 1 < nrElems/2) && (Bstart + bidx + 1 < constStartAddr[division + 1]);
if(elemsLeftInA){
if(elemsLeftInB){
if(nextA.x < nextB.x) { aidx += 1; a = nextA; }
else { bidx += 1; a = nextB; }
}
else {
aidx += 1; a = nextA;
}
}
else {
if(elemsLeftInB){
bidx += 1; a = nextB;
}
else {
break;
}
}
}
resStart[outidx++] = b;
}
__global__ void
mergepack(float *orig, float *result)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int division = blockIdx.y;
if((finalStartAddr[division] + idx) >= finalStartAddr[division + 1]) return;
result[finalStartAddr[division] + idx] = orig[constStartAddr[division]*4 + nullElems[division] + idx];
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 256a471fee15a995e720d1016a371752f6ce2c52.cu | #ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
// declare texture reference for 1D float texture
texture<float4, 1, cudaReadModeElementType> tex;
texture<float, 1, cudaReadModeElementType> txt;
__device__ float4 sortElem(float4 r) {
float4 nr;
nr.x = (r.x > r.y) ? r.y : r.x;
nr.y = (r.y > r.x) ? r.y : r.x;
nr.z = (r.z > r.w) ? r.w : r.z;
nr.w = (r.w > r.z) ? r.w : r.z;
r.x = (nr.x > nr.z) ? nr.z : nr.x;
r.y = (nr.y > nr.w) ? nr.w : nr.y;
r.z = (nr.z > nr.x) ? nr.z : nr.x;
r.w = (nr.w > nr.y) ? nr.w : nr.y;
nr.x = r.x;
nr.y = (r.y > r.z) ? r.z : r.y;
nr.z = (r.z > r.y) ? r.z : r.y;
nr.w = r.w;
return nr;
}
__device__ float4 getLowest(float4 a, float4 b)
{
//float4 na;
a.x = (a.x < b.w) ? a.x : b.w;
a.y = (a.y < b.z) ? a.y : b.z;
a.z = (a.z < b.y) ? a.z : b.y;
a.w = (a.w < b.x) ? a.w : b.x;
return a;
}
__device__ float4 getHighest(float4 a, float4 b)
{
b.x = (a.w >= b.x) ? a.w : b.x;
b.y = (a.z >= b.y) ? a.z : b.y;
b.z = (a.y >= b.z) ? a.y : b.z;
b.w = (a.x >= b.w) ? a.x : b.w;
return b;
}
__constant__ int constStartAddr[DIVISIONS + 1];
__constant__ int finalStartAddr[DIVISIONS + 1];
__constant__ int nullElems[DIVISIONS];
__global__ void
mergeSortFirst(float4 *result, int listsize)
{
// Block index
int bx = blockIdx.x;
// Thread index
//int tx = threadIdx.x;
if(bx*blockDim.x + threadIdx.x < listsize/4){
float4 r = tex1Dfetch(tex, (int)(bx*blockDim.x + threadIdx.x));
result[bx * blockDim.x + threadIdx.x] = sortElem(r);
}
}
__global__ void
mergeSortPass(float4 *result, int nrElems, int threadsPerDiv)
{
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// The division to work on
int division = tid / threadsPerDiv;
if(division >= DIVISIONS) return;
// The block within the division
int int_tid = tid - division * threadsPerDiv;
int Astart = constStartAddr[division] + int_tid * nrElems;
int Bstart = Astart + nrElems/2;
float4 *resStart = &(result[Astart]);
if(Astart >= constStartAddr[division + 1])
return;
if(Bstart >= constStartAddr[division + 1]){
for(int i=0; i<(constStartAddr[division + 1] - Astart); i++)
{
resStart[i] = tex1Dfetch(tex, Astart + i);
}
return;
}
int aidx = 0;
int bidx = 0;
int outidx = 0;
float4 a, b;
a = tex1Dfetch(tex, Astart + aidx);
b = tex1Dfetch(tex, Bstart + bidx);
while(true)//aidx < nrElems/2)// || (bidx < nrElems/2 && (Bstart + bidx < constEndAddr[division])))
{
/**
* For some reason, it's faster to do the texture fetches here than
* after the merge
*/
float4 nextA = tex1Dfetch(tex, Astart + aidx + 1);
float4 nextB = tex1Dfetch(tex, Bstart + bidx + 1);
float4 na = getLowest(a,b);
float4 nb = getHighest(a,b);
a = sortElem(na);
b = sortElem(nb);
// Now, a contains the lowest four elements, sorted
resStart[outidx++] = a;
bool elemsLeftInA;
bool elemsLeftInB;
elemsLeftInA = (aidx + 1 < nrElems/2); // Astart + aidx + 1 is allways less than division border
elemsLeftInB = (bidx + 1 < nrElems/2) && (Bstart + bidx + 1 < constStartAddr[division + 1]);
if(elemsLeftInA){
if(elemsLeftInB){
if(nextA.x < nextB.x) { aidx += 1; a = nextA; }
else { bidx += 1; a = nextB; }
}
else {
aidx += 1; a = nextA;
}
}
else {
if(elemsLeftInB){
bidx += 1; a = nextB;
}
else {
break;
}
}
}
resStart[outidx++] = b;
}
__global__ void
mergepack(float *orig, float *result)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int division = blockIdx.y;
if((finalStartAddr[division] + idx) >= finalStartAddr[division + 1]) return;
result[finalStartAddr[division] + idx] = orig[constStartAddr[division]*4 + nullElems[division] + idx];
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
0a606b0aef5477d597bc5518cf7efa3731bb4a04.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Full license terms provided in LICENSE.md file.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <NvInfer.h>
#include <opencv2/opencv.hpp>
#include "classify_image/utils.h"
#include <unistd.h>
using namespace std;
using namespace nvinfer1;
class Logger : public ILogger
{
void log(Severity severity, const char * msg) override
{
if (severity != Severity::kINFO)
cout << msg << endl;
}
} gLogger;
/**
* image_file: path to image
* plan_file: path of the serialized engine file
* label_file: file with <class_name> per line
* input_name: name of the input tensor
* output_name: name of the output tensor
* preprocessing_fn: 'vgg' or 'inception'
*/
int main(int argc, char *argv[])
{
if (argc != 7)
{
cout << "Usage: classify_image <image_file> <plan_file> <label_file> <input_name> <output_name> <preprocessing_fn>\n";
return 0;
}
string imageFilename = argv[1];
string planFilename = argv[2];
string labelFilename = argv[3];
string inputName = argv[4];
string outputName = argv[5];
string preprocessingFn = argv[6];
/* load the engine */
cout << "Loading TensorRT engine from plan file..." << endl;
ifstream planFile(planFilename);
if (!planFile.is_open())
{
cout << "Could not open plan file." << endl;
return 1;
}
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
IRuntime *runtime = createInferRuntime(gLogger);
ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr);
IExecutionContext *context = engine->createExecutionContext();
/* get the input / output dimensions */
int inputBindingIndex, outputBindingIndex;
inputBindingIndex = engine->getBindingIndex(inputName.c_str());
outputBindingIndex = engine->getBindingIndex(outputName.c_str());
if (inputBindingIndex < 0)
{
cout << "Invalid input name." << endl;
return 1;
}
if (outputBindingIndex < 0)
{
cout << "Invalid output name." << endl;
return 1;
}
Dims inputDims, outputDims;
inputDims = engine->getBindingDimensions(inputBindingIndex);
outputDims = engine->getBindingDimensions(outputBindingIndex);
int inputWidth, inputHeight;
inputHeight = inputDims.d[1];
inputWidth = inputDims.d[2];
/* read image, convert color, and resize */
cout << "Preprocessing input..." << endl;
cv::Mat image = cv::imread(imageFilename, CV_LOAD_IMAGE_COLOR);
if (image.data == NULL)
{
cout << "Could not read image from file." << endl;
return 1;
}
cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3);
cv::resize(image, image, cv::Size(inputWidth, inputHeight));
/* convert from uint8+NHWC to float+NCHW */
float *inputDataHost, *outputDataHost;
size_t numInput, numOutput;
numInput = numTensorElements(inputDims);
numOutput = numTensorElements(outputDims);
inputDataHost = (float*) malloc(numInput * sizeof(float));
outputDataHost = (float*) malloc(numOutput * sizeof(float));
cvImageToTensor(image, inputDataHost, inputDims);
if (preprocessingFn == "vgg")
preprocessVgg(inputDataHost, inputDims);
else if (preprocessingFn == "inception")
preprocessInception(inputDataHost, inputDims);
else
{
cout << "Invalid preprocessing function argument, must be vgg or inception. \n" << endl;
return 1;
}
/* transfer to device */
float *inputDataDevice, *outputDataDevice;
hipMalloc(&inputDataDevice, numInput * sizeof(float));
hipMalloc(&outputDataDevice, numOutput * sizeof(float));
hipMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), hipMemcpyHostToDevice);
void *bindings[2];
bindings[inputBindingIndex] = (void*) inputDataDevice;
bindings[outputBindingIndex] = (void*) outputDataDevice;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/* execute engine */
sleep(1); // know distinguish when does the inference start in the scope
cout << "Executing inference engine..." << endl;
const int kBatchSize = 1;
hipEventRecord(start);
context->execute(kBatchSize, bindings);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << "Execution time: " << milliseconds << " ms" << endl;
/* transfer output back to host */
hipMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), hipMemcpyDeviceToHost);
sleep(1); // know distinguish when does the inference end in the scope
/* parse output */
vector<size_t> sortedIndices = argsort(outputDataHost, outputDims);
ifstream labelsFile(labelFilename);
if (!labelsFile.is_open())
{
cout << "\nCould not open label file." << endl;
return 1;
}
vector<string> labelMap;
string label;
while(getline(labelsFile, label))
{
labelMap.push_back(label);
}
cout << "\nThe top-5 indices are: ";
for (int i = 0; i < 5; i++)
cout << endl << i << ". " << labelMap[sortedIndices[i]] << " (" << outputDataHost[sortedIndices[i]] * 100 << "%)";
cout << endl;
/* clean up */
runtime->destroy();
engine->destroy();
context->destroy();
free(inputDataHost);
free(outputDataHost);
hipFree(inputDataDevice);
hipFree(outputDataDevice);
return 0;
}
| 0a606b0aef5477d597bc5518cf7efa3731bb4a04.cu | /**
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Full license terms provided in LICENSE.md file.
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <NvInfer.h>
#include <opencv2/opencv.hpp>
#include "classify_image/utils.h"
#include <unistd.h>
using namespace std;
using namespace nvinfer1;
class Logger : public ILogger
{
void log(Severity severity, const char * msg) override
{
if (severity != Severity::kINFO)
cout << msg << endl;
}
} gLogger;
/**
* image_file: path to image
* plan_file: path of the serialized engine file
* label_file: file with <class_name> per line
* input_name: name of the input tensor
* output_name: name of the output tensor
* preprocessing_fn: 'vgg' or 'inception'
*/
int main(int argc, char *argv[])
{
if (argc != 7)
{
cout << "Usage: classify_image <image_file> <plan_file> <label_file> <input_name> <output_name> <preprocessing_fn>\n";
return 0;
}
string imageFilename = argv[1];
string planFilename = argv[2];
string labelFilename = argv[3];
string inputName = argv[4];
string outputName = argv[5];
string preprocessingFn = argv[6];
/* load the engine */
cout << "Loading TensorRT engine from plan file..." << endl;
ifstream planFile(planFilename);
if (!planFile.is_open())
{
cout << "Could not open plan file." << endl;
return 1;
}
stringstream planBuffer;
planBuffer << planFile.rdbuf();
string plan = planBuffer.str();
IRuntime *runtime = createInferRuntime(gLogger);
ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr);
IExecutionContext *context = engine->createExecutionContext();
/* get the input / output dimensions */
int inputBindingIndex, outputBindingIndex;
inputBindingIndex = engine->getBindingIndex(inputName.c_str());
outputBindingIndex = engine->getBindingIndex(outputName.c_str());
if (inputBindingIndex < 0)
{
cout << "Invalid input name." << endl;
return 1;
}
if (outputBindingIndex < 0)
{
cout << "Invalid output name." << endl;
return 1;
}
Dims inputDims, outputDims;
inputDims = engine->getBindingDimensions(inputBindingIndex);
outputDims = engine->getBindingDimensions(outputBindingIndex);
int inputWidth, inputHeight;
inputHeight = inputDims.d[1];
inputWidth = inputDims.d[2];
/* read image, convert color, and resize */
cout << "Preprocessing input..." << endl;
cv::Mat image = cv::imread(imageFilename, CV_LOAD_IMAGE_COLOR);
if (image.data == NULL)
{
cout << "Could not read image from file." << endl;
return 1;
}
cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3);
cv::resize(image, image, cv::Size(inputWidth, inputHeight));
/* convert from uint8+NHWC to float+NCHW */
float *inputDataHost, *outputDataHost;
size_t numInput, numOutput;
numInput = numTensorElements(inputDims);
numOutput = numTensorElements(outputDims);
inputDataHost = (float*) malloc(numInput * sizeof(float));
outputDataHost = (float*) malloc(numOutput * sizeof(float));
cvImageToTensor(image, inputDataHost, inputDims);
if (preprocessingFn == "vgg")
preprocessVgg(inputDataHost, inputDims);
else if (preprocessingFn == "inception")
preprocessInception(inputDataHost, inputDims);
else
{
cout << "Invalid preprocessing function argument, must be vgg or inception. \n" << endl;
return 1;
}
/* transfer to device */
float *inputDataDevice, *outputDataDevice;
cudaMalloc(&inputDataDevice, numInput * sizeof(float));
cudaMalloc(&outputDataDevice, numOutput * sizeof(float));
cudaMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), cudaMemcpyHostToDevice);
void *bindings[2];
bindings[inputBindingIndex] = (void*) inputDataDevice;
bindings[outputBindingIndex] = (void*) outputDataDevice;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* execute engine */
sleep(1); // know distinguish when does the inference start in the scope
cout << "Executing inference engine..." << endl;
const int kBatchSize = 1;
cudaEventRecord(start);
context->execute(kBatchSize, bindings);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "Execution time: " << milliseconds << " ms" << endl;
/* transfer output back to host */
cudaMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), cudaMemcpyDeviceToHost);
sleep(1); // know distinguish when does the inference end in the scope
/* parse output */
vector<size_t> sortedIndices = argsort(outputDataHost, outputDims);
ifstream labelsFile(labelFilename);
if (!labelsFile.is_open())
{
cout << "\nCould not open label file." << endl;
return 1;
}
vector<string> labelMap;
string label;
while(getline(labelsFile, label))
{
labelMap.push_back(label);
}
cout << "\nThe top-5 indices are: ";
for (int i = 0; i < 5; i++)
cout << endl << i << ". " << labelMap[sortedIndices[i]] << " (" << outputDataHost[sortedIndices[i]] * 100 << "%)";
cout << endl;
/* clean up */
runtime->destroy();
engine->destroy();
context->destroy();
free(inputDataHost);
free(outputDataHost);
cudaFree(inputDataDevice);
cudaFree(outputDataDevice);
return 0;
}
|
fc356891e1a49242ef6504fffc14c122129db4ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MyInc.h"
// Question 1 en mmoire globale
// Mergesmall_k
__global__ void MergePathGPU_1024(TYPE *A, TYPE *B, TYPE *M, int sizeA, int sizeB)
{
int i = threadIdx.x ;
Point K, P, Q;
int offset ;
if (i >= (sizeA + sizeB)) { return ; } // On gre les bordements
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ M[i] = A[Q.y] ; }
else
{ M[i] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of MergePathGPU_1024
//Question 1 en mmoire shared
// Mergesmall_k_shared
__global__ void MergePathGPU_1024_shared(TYPE *GlobalCudaA, TYPE *GlobalCudaB, TYPE *M, int sizeA, int sizeB)
{
extern __shared__ TYPE dataAB[] ;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= (sizeA + sizeB)) { return ; } // On gre les bordements
// Chargement des donnes dans la mmoire partage par le thread ;
dataAB[tid] = (i < sizeA)?GlobalCudaA[i]:GlobalCudaB[i-sizeA] ;
// On attend qur tous les threads aient faits le travail
__syncthreads();
// On recadre nos pointeurs pourqu'ils pointent vers la mmoire partage et la globale
TYPE * A = dataAB ;
TYPE * B = dataAB + sizeA ;
Point K, P, Q;
int offset ;
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ M[i] = A[Q.y] ; }
else
{ M[i] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of MergePathGPU_1024
//Question 2
// Pour merge partir des fenetre obetnue
// mergeBig k
__global__ void mergeGPU(TYPE * CudaVecteurA, TYPE * CudaVecteurB, TYPE * CudaVecteurC, int * CudaDiagAy, int * CudaDiagBx , int nbthread)
{
// int i = threadIdx.x ; // On renge le Ieme element
int i = blockIdx.x * blockDim.x + threadIdx.x; // On range le ieme elet
int diag = (i / nbthread) ; // Dans quel fentre est-il ?
int indC = nbthread * diag ;
TYPE *A = CudaVecteurA+CudaDiagAy[diag] ;
TYPE *B = CudaVecteurB+CudaDiagBx[diag] ;
TYPE *M = CudaVecteurC + indC ;
int sizeA = CudaDiagAy[diag+1]-CudaDiagAy[diag] ;
int sizeB = CudaDiagBx[diag+1]-CudaDiagBx[diag] ;
Point K, P, Q;
int offset ;
i = i % nbthread ; // On recadre i dans le nouvel espace
if (i >= (sizeA + sizeB)) { return ; } // On gre les bordements
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ M[i] = A[Q.y] ; }
else
{ M[i] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of MergeGPU
// Initialiser les diagonale
__global__ void initDiagGPU(int SizeA , int SizeB, int * CudaDiagBx, int * CudaDiagAy, int NbWindows)
{
CudaDiagBx[0] = CudaDiagAy[0] = 0 ;
CudaDiagBx[NbWindows] = SizeB ;
CudaDiagAy[NbWindows] = SizeA ;
}
// Question 2
// Pour obtenir les diagolane
// PathBig k
__global__ void AnalyseDiagonalesGPU(TYPE * CudaVecteurA, TYPE * CudaVecteurB, int sizeA , int sizeB, int * CudaDiagBx, int * CudaDiagAy, int nbthread)
{
// int i = blockIdx.x * blockDim.x + threadIdx.x; // On range le ieme elet
int nth = threadIdx.x; // On explore le nth diagonale
Point K, P, Q ;
int px , py ;
TYPE * A = CudaVecteurA ;
TYPE * B = CudaVecteurB ;
int offset ;
int numDiag = (nth+1) * nbthread -1 ; // Les tableaux vont de 0 N-1
if (numDiag > sizeA)
{
K.x = numDiag - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = numDiag - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = numDiag ;
P.x = numDiag ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
px = Q.x ; py = Q.y ;
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ // v = A[Q.y] ;
py ++ ;
}
else
{ // v = B[Q.x] ;
px ++ ;
}
// printf("Analyse Diagonale Point de Sortie ref %d - M %" FMT " Q (A Q.y %d) (B Q.x %d) rv.x %d rv.y %d\n",i,v,Q.y,Q.x,rv->x,rv->y) ;
CudaDiagBx[nth+1] = px ; CudaDiagAy[nth+1] = py ;
break ; // Pour simuler passage au thread suivant
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of AnalyseDiagonales
| fc356891e1a49242ef6504fffc14c122129db4ee.cu | #include "MyInc.h"
// Question 1 en mémoire globale
// Mergesmall_k
__global__ void MergePathGPU_1024(TYPE *A, TYPE *B, TYPE *M, int sizeA, int sizeB)
{
int i = threadIdx.x ;
Point K, P, Q;
int offset ;
if (i >= (sizeA + sizeB)) { return ; } // On gère les ébordements
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ M[i] = A[Q.y] ; }
else
{ M[i] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of MergePathGPU_1024
//Question 1 en mémoire shared
// Mergesmall_k_shared
__global__ void MergePathGPU_1024_shared(TYPE *GlobalCudaA, TYPE *GlobalCudaB, TYPE *M, int sizeA, int sizeB)
{
extern __shared__ TYPE dataAB[] ;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= (sizeA + sizeB)) { return ; } // On gère les ébordements
// Chargement des données dans la mémoire partagée par le thread ;
dataAB[tid] = (i < sizeA)?GlobalCudaA[i]:GlobalCudaB[i-sizeA] ;
// On attend qur tous les threads aient faits le travail
__syncthreads();
// On recadre nos pointeurs pourqu'ils pointent vers la mémoire partagée et la globale
TYPE * A = dataAB ;
TYPE * B = dataAB + sizeA ;
Point K, P, Q;
int offset ;
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ M[i] = A[Q.y] ; }
else
{ M[i] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of MergePathGPU_1024
//Question 2
// Pour merge à partir des fenetre obetnue
// mergeBig k
__global__ void mergeGPU(TYPE * CudaVecteurA, TYPE * CudaVecteurB, TYPE * CudaVecteurC, int * CudaDiagAy, int * CudaDiagBx , int nbthread)
{
// int i = threadIdx.x ; // On renge le Ieme element
int i = blockIdx.x * blockDim.x + threadIdx.x; // On range le ieme elet
int diag = (i / nbthread) ; // Dans quel fenêtre est-il ?
int indC = nbthread * diag ;
TYPE *A = CudaVecteurA+CudaDiagAy[diag] ;
TYPE *B = CudaVecteurB+CudaDiagBx[diag] ;
TYPE *M = CudaVecteurC + indC ;
int sizeA = CudaDiagAy[diag+1]-CudaDiagAy[diag] ;
int sizeB = CudaDiagBx[diag+1]-CudaDiagBx[diag] ;
Point K, P, Q;
int offset ;
i = i % nbthread ; // On recadre i dans le nouvel espace
if (i >= (sizeA + sizeB)) { return ; } // On gère les ébordements
if (i > sizeA)
{
K.x = i - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = i - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = i ;
P.x = i ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ M[i] = A[Q.y] ; }
else
{ M[i] = B[Q.x] ; }
break ;
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of MergeGPU
// Initialiser les diagonale
__global__ void initDiagGPU(int SizeA , int SizeB, int * CudaDiagBx, int * CudaDiagAy, int NbWindows)
{
CudaDiagBx[0] = CudaDiagAy[0] = 0 ;
CudaDiagBx[NbWindows] = SizeB ;
CudaDiagAy[NbWindows] = SizeA ;
}
// Question 2
// Pour obtenir les diagolane
// PathBig k
__global__ void AnalyseDiagonalesGPU(TYPE * CudaVecteurA, TYPE * CudaVecteurB, int sizeA , int sizeB, int * CudaDiagBx, int * CudaDiagAy, int nbthread)
{
// int i = blockIdx.x * blockDim.x + threadIdx.x; // On range le ieme elet
int nth = threadIdx.x; // On explore le nth diagonale
Point K, P, Q ;
int px , py ;
TYPE * A = CudaVecteurA ;
TYPE * B = CudaVecteurB ;
int offset ;
int numDiag = (nth+1) * nbthread -1 ; // Les tableaux vont de 0 à N-1
if (numDiag > sizeA)
{
K.x = numDiag - sizeA ; K.y = sizeA ;
P.x = sizeA ; P.y = numDiag - sizeA ;
}
else // x ~ horizontal
{
K.x = 0 ; K.y = numDiag ;
P.x = numDiag ; P.y = 0 ;
}
while (1)
{
offset = abs(K.y - P.y) / 2 ;
Q.x = K.x + offset ; Q.y = K.y - offset ;
if ( (Q.y >= 0) && (Q.x <= sizeB) &&
( (Q.y == sizeA) || (Q.x == 0) || (A[Q.y] > B[Q.x -1])) )
{
if ((Q.x == sizeB) || (Q.y == 0) || (A[Q.y-1] <= B[Q.x]))
{
px = Q.x ; py = Q.y ;
if ((Q.y < sizeA) && ((Q.x == sizeB) || (A[Q.y] <= B[Q.x])))
{ // v = A[Q.y] ;
py ++ ;
}
else
{ // v = B[Q.x] ;
px ++ ;
}
// printf("Analyse Diagonale Point de Sortie ref %d - M %" FMT " Q (A Q.y %d) (B Q.x %d) rv.x %d rv.y %d\n",i,v,Q.y,Q.x,rv->x,rv->y) ;
CudaDiagBx[nth+1] = px ; CudaDiagAy[nth+1] = py ;
break ; // Pour simuler passage au thread suivant
}
else
{ K.x = Q.x + 1 ; K.y = Q.y - 1 ; }
}
else
{ P.x = Q.x -1 ; P.y = Q.y + 1 ; }
}
} // End of AnalyseDiagonales
|
3aa7cf62329bc25703e234605f17682efd9f87e6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
int cpu_reduce(int *data, unsigned int n) {
int res = 0;
for (int i = 0; i < n; ++i)
res += data[i];
return res;
}
__global__ void reduce(int *data, int *result) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = data[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) result[blockIdx.x] = sdata[0];
}
void print_device_info() {
int dev = 0;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, dev);
printf("DEVICE: %s\r\n", prop.name);
hipSetDevice(dev);
}
void cpu_expirement(int* data, int size) {
clock_t startc, end;
double cpu_time_used;
startc = clock();
int cpu_sum = cpu_reduce(data, size);
end = clock();
cpu_time_used = ((double)(end - startc)) / CLOCKS_PER_SEC;
cpu_time_used *= 1000;
printf("CPU: TIME=%fms; RESULT=%d\n", cpu_time_used, cpu_sum);
}
void gpu_experiment(int* data, int size) {
float timerValueGPU;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
size_t bytes = size * sizeof(int);
int blockSize = 512;
const dim3 block(blockSize, 1);
const dim3 grid((size + block.x - 1) / block.x, 1);
int *result = (int *)malloc(grid.x * sizeof(int));
int *device_input;
int *device_output;
hipMalloc((void **)&device_input, bytes);
hipMalloc((void **)&device_output, bytes);
hipMemcpy(device_input, data, bytes, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduce) , dim3(grid), dim3(block), blockSize * sizeof(int), 0, device_input, device_output);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timerValueGPU, start, stop);
hipMemcpy(result, device_output, grid.x * sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
int gpu_sum = 0;
for (int i = 0; i < grid.x; ++i) gpu_sum += result[i];
printf("GPU: TIME=%fms; RESULT=%d", timerValueGPU, gpu_sum);
hipDeviceReset();
hipFree(device_output);
hipFree(device_input);
free(result);
}
int main(int argc, char **argv) {
print_device_info();
int size = 6660000;
printf("SIZE:%d\n", size);
size_t bytes = size * sizeof(int);
int *data = (int *)malloc(bytes);
for (int i = 0; i < size; ++i)
data[i] = (int)(rand() & 0xFF);
cpu_expirement(data,size);
gpu_experiment(data, size);
free(data);
return 0;
}
| 3aa7cf62329bc25703e234605f17682efd9f87e6.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
int cpu_reduce(int *data, unsigned int n) {
int res = 0;
for (int i = 0; i < n; ++i)
res += data[i];
return res;
}
__global__ void reduce(int *data, int *result) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = data[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) result[blockIdx.x] = sdata[0];
}
void print_device_info() {
int dev = 0;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, dev);
printf("DEVICE: %s\r\n", prop.name);
cudaSetDevice(dev);
}
void cpu_expirement(int* data, int size) {
clock_t startc, end;
double cpu_time_used;
startc = clock();
int cpu_sum = cpu_reduce(data, size);
end = clock();
cpu_time_used = ((double)(end - startc)) / CLOCKS_PER_SEC;
cpu_time_used *= 1000;
printf("CPU: TIME=%fms; RESULT=%d\n", cpu_time_used, cpu_sum);
}
void gpu_experiment(int* data, int size) {
float timerValueGPU;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size_t bytes = size * sizeof(int);
int blockSize = 512;
const dim3 block(blockSize, 1);
const dim3 grid((size + block.x - 1) / block.x, 1);
int *result = (int *)malloc(grid.x * sizeof(int));
int *device_input;
int *device_output;
cudaMalloc((void **)&device_input, bytes);
cudaMalloc((void **)&device_output, bytes);
cudaMemcpy(device_input, data, bytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaEventRecord(start, 0);
reduce <<<grid, block, blockSize * sizeof(int)>>>(device_input, device_output);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timerValueGPU, start, stop);
cudaMemcpy(result, device_output, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int gpu_sum = 0;
for (int i = 0; i < grid.x; ++i) gpu_sum += result[i];
printf("GPU: TIME=%fms; RESULT=%d", timerValueGPU, gpu_sum);
cudaDeviceReset();
cudaFree(device_output);
cudaFree(device_input);
free(result);
}
int main(int argc, char **argv) {
print_device_info();
int size = 6660000;
printf("SIZE:%d\n", size);
size_t bytes = size * sizeof(int);
int *data = (int *)malloc(bytes);
for (int i = 0; i < size; ++i)
data[i] = (int)(rand() & 0xFF);
cpu_expirement(data,size);
gpu_experiment(data, size);
free(data);
return 0;
}
|
42123181ac712bf2290bfb4805d41d9e3713f5ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cstdio>
#include<iostream>
#include<cassert>
#include<helper_cuda.h>
#include<chrono>
#include"simpleMultiGPU.h"
using std::cout;
using std::endl;
using namespace std::chrono;
constexpr int MAX_GPU_COUNT = 32;
constexpr int DATA_N = 1048576*32;
__global__ static void
reduceKernel(float *d_result, float *d_input, int N){
//
//
const int tid = blockIdx.x*blockDim.x+threadIdx.x; //
const int threadN = gridDim.x*blockDim.x;//
float sum = 0;
for(int i = tid; i<N; i+= threadN)//
sum += d_input[i];
d_result[tid] = sum;
}
int main(int argc, char*argv[]){
TGPUplan plan[MAX_GPU_COUNT]; //32gpu
float h_SumGPU[MAX_GPU_COUNT]; //32gpu
// float sumGPU;
// double sumCPU, diff;
constexpr int block = 32;
constexpr int thread = 256;
constexpr int ACCUM_N = block*thread;
int gpu_num;
checkCudaErrors(hipGetDeviceCount(&gpu_num));
if(gpu_num > MAX_GPU_COUNT)
gpu_num = MAX_GPU_COUNT;
cout<<":"<<gpu_num<<" "<<endl;
cout<<""<<endl;
//
for(int i=0; i<gpu_num; i++)
plan[i].dataN = DATA_N/gpu_num;
//
for(int i=0; i<DATA_N%gpu_num; i++)
plan[i].dataN++;
int gpuBase = 0;
for(int i=0; i<gpu_num; i++){
plan[i].h_Sum = h_SumGPU + i; //h_SumGPU
gpuBase += plan[i].dataN;
}
for(int i=0; i<gpu_num; i++){
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipStreamCreate(&plan[i].stream));
//
checkCudaErrors(hipMalloc((void**)&plan[i].d_Data,
plan[i].dataN*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&plan[i].d_Sum,
ACCUM_N*sizeof(float)));
checkCudaErrors(hipHostMalloc((void**)&plan[i].h_Sum_from_device,
ACCUM_N*sizeof(float)));
checkCudaErrors(hipHostMalloc((void**)&plan[i].h_Data,
plan[i].dataN*sizeof(float)));
//
for(int j=0; j<plan[i].dataN; j++)
plan[i].h_Data[j] = (float)rand()/(float)RAND_MAX;
}
auto st = system_clock::now();
//cpugpu
for(int i=0; i<gpu_num; i++){
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipMemcpyAsync(plan[i].d_Data,
plan[i].h_Data,
plan[i].dataN*sizeof(float),
hipMemcpyHostToDevice,
plan[i].stream));
hipLaunchKernelGGL(( reduceKernel), dim3(block), dim3(thread),0,plan[i].stream, plan[i].d_Sum,
plan[i].d_Data,
plan[i].dataN);
getLastCudaError("reduceKernel() executaion failed\n");
checkCudaErrors(hipMemcpyAsync(plan[i].h_Sum_from_device,
plan[i].d_Sum,
ACCUM_N*sizeof(float),
hipMemcpyDeviceToHost,
plan[i].stream));
}
for(int i=0; i<gpu_num; i++){
float sum=0;
checkCudaErrors(hipSetDevice(i));
hipStreamSynchronize(plan[i].stream);
for(int j=0; j<ACCUM_N; j++)
sum+= plan[i].h_Sum_from_device[j];
*(plan[i].h_Sum) = (float)sum; // h_SumGPU
checkCudaErrors(hipHostFree(plan[i].h_Sum_from_device));
checkCudaErrors(hipFree(plan[i].d_Sum));
checkCudaErrors(hipFree(plan[i].d_Data));
checkCudaErrors(hipStreamDestroy(plan[i].stream));
}
float sumGPU = 0;
for(int i=0; i<gpu_num; i++)
sumGPU += h_SumGPU[i];
auto ed = system_clock::now();
//cout<<" GPU :"<<difftime(ed,st)<<" s"<<endl;
cout<<" GPU :"<<duration_cast<milliseconds>(ed-st).count()<<" ms"<<endl;
for(int i=0;i<gpu_num;i++){
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipHostFree(plan[i].h_Data));
}
}
| 42123181ac712bf2290bfb4805d41d9e3713f5ce.cu | #include<cstdio>
#include<iostream>
#include<cassert>
#include<helper_cuda.h>
#include<chrono>
#include"simpleMultiGPU.h"
using std::cout;
using std::endl;
using namespace std::chrono;
constexpr int MAX_GPU_COUNT = 32;
constexpr int DATA_N = 1048576*32;
__global__ static void
reduceKernel(float *d_result, float *d_input, int N){
//一个数值矩阵,第一行的个数为线程个数,然后第一行每个线程各自计算自己
//这一列的数据
const int tid = blockIdx.x*blockDim.x+threadIdx.x; // 一行
const int threadN = gridDim.x*blockDim.x;// 一行线程
float sum = 0;
for(int i = tid; i<N; i+= threadN)// 这行线程每个线程往下计算
sum += d_input[i];
d_result[tid] = sum;
}
int main(int argc, char*argv[]){
TGPUplan plan[MAX_GPU_COUNT]; //最大支持32个gpu
float h_SumGPU[MAX_GPU_COUNT]; //最大支持32个gpu
// float sumGPU;
// double sumCPU, diff;
constexpr int block = 32;
constexpr int thread = 256;
constexpr int ACCUM_N = block*thread;
int gpu_num;
checkCudaErrors(cudaGetDeviceCount(&gpu_num));
if(gpu_num > MAX_GPU_COUNT)
gpu_num = MAX_GPU_COUNT;
cout<<"可用的设备有:"<<gpu_num<<" 个"<<endl;
cout<<"生成数据"<<endl;
// 平均分配空间
for(int i=0; i<gpu_num; i++)
plan[i].dataN = DATA_N/gpu_num;
//把余数分完
for(int i=0; i<DATA_N%gpu_num; i++)
plan[i].dataN++;
int gpuBase = 0;
for(int i=0; i<gpu_num; i++){
plan[i].h_Sum = h_SumGPU + i; //分配在h_SumGPU 数组的位置
gpuBase += plan[i].dataN;
}
for(int i=0; i<gpu_num; i++){
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaStreamCreate(&plan[i].stream));
//分配内存
checkCudaErrors(cudaMalloc((void**)&plan[i].d_Data,
plan[i].dataN*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&plan[i].d_Sum,
ACCUM_N*sizeof(float)));
checkCudaErrors(cudaMallocHost((void**)&plan[i].h_Sum_from_device,
ACCUM_N*sizeof(float)));
checkCudaErrors(cudaMallocHost((void**)&plan[i].h_Data,
plan[i].dataN*sizeof(float)));
//随机初始化
for(int j=0; j<plan[i].dataN; j++)
plan[i].h_Data[j] = (float)rand()/(float)RAND_MAX;
}
auto st = system_clock::now();
//将数据从cpu拷贝到gpu,
for(int i=0; i<gpu_num; i++){
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaMemcpyAsync(plan[i].d_Data,
plan[i].h_Data,
plan[i].dataN*sizeof(float),
cudaMemcpyHostToDevice,
plan[i].stream));
reduceKernel<<<block, thread,0,plan[i].stream>>>(plan[i].d_Sum,
plan[i].d_Data,
plan[i].dataN);
getLastCudaError("reduceKernel() executaion failed\n");
checkCudaErrors(cudaMemcpyAsync(plan[i].h_Sum_from_device,
plan[i].d_Sum,
ACCUM_N*sizeof(float),
cudaMemcpyDeviceToHost,
plan[i].stream));
}
for(int i=0; i<gpu_num; i++){
float sum=0;
checkCudaErrors(cudaSetDevice(i));
cudaStreamSynchronize(plan[i].stream);
for(int j=0; j<ACCUM_N; j++)
sum+= plan[i].h_Sum_from_device[j];
*(plan[i].h_Sum) = (float)sum; // 将结果写入到h_SumGPU对应位置
checkCudaErrors(cudaFreeHost(plan[i].h_Sum_from_device));
checkCudaErrors(cudaFree(plan[i].d_Sum));
checkCudaErrors(cudaFree(plan[i].d_Data));
checkCudaErrors(cudaStreamDestroy(plan[i].stream));
}
float sumGPU = 0;
for(int i=0; i<gpu_num; i++)
sumGPU += h_SumGPU[i];
auto ed = system_clock::now();
//cout<<" GPU 处理时间:"<<difftime(ed,st)<<" s"<<endl;
cout<<" GPU 处理时间:"<<duration_cast<milliseconds>(ed-st).count()<<" ms"<<endl;
for(int i=0;i<gpu_num;i++){
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaFreeHost(plan[i].h_Data));
}
}
|
3ea57ab7af37ed18522fd8235ad8e14d5e5bea04.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defs.h"
#include "cuda_defs.h"
#ifdef VISCOSITY
extern __host__ __device__ real kinematic_viscosity(real x1, real x2, real x3);
__global__ void compute_divergence(real *cons, real *vel,
real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) {
int i,j,k,indx;
real vm,vc,vp;
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=-NGHX1+1)&&(i<nx1+2)&&(j>=-NGHX2+1)&&(j<nx2+2)&&(k>=-NGHX3+1)&&(k<nx3+2)) {
/* X1 direction */
vm = cons[indx - 1 + 1*ntot]/cons[indx - 1];
vc = cons[indx + 1*ntot]/cons[indx];
vp = cons[indx + 1 + 1*ntot]/cons[indx + 1];
vel[indx + 1*ntot] = vc;
vel[indx] = (vp-vm)/(2*dx1[i]);
/* X2 direction */
#ifdef DIMS2
vm = cons[indx - size_x1 + 2*ntot]/cons[indx - size_x1];
vc = cons[indx + 2*ntot]/cons[indx];
vp = cons[indx + size_x1 + 2*ntot]/cons[indx + size_x1];
vel[indx + 2*ntot] = vc;
vel[indx] += (vp-vm)/(2*dx2[j]);
#endif
/* X3 direction */
#ifdef DIMS3
vm = cons[indx - size_x12 + 3*ntot]/cons[indx - size_x12];
vc = cons[indx + 3*ntot]/cons[indx];
vp = cons[indx + size_x12 + 3*ntot]/cons[indx + size_x12];
vel[indx + 3*ntot] = vc;
vel[indx] += (vp-vm)/(2*dx3[k]);
#endif
}
}
return;
}
__global__ void viscous_flux(real *vel, real *rho, real *F_1, real *F_2, real *F_3,
real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) {
/*
* * * * * * * * * * * * *
* *
* * *
* (i,j+1) (i+1,j+1) *
* * *
* *
* * F2[i,j] * * * * * * * *
* *
* * *
F1[i-1,j] (i,j) F1[i,j] (i+1,j) *
* * *
* *
* * F2[i,j-1]* * * * * * * *
* *
* * *
* (i,j-1) (i+1,j-1) *
* * *
* *
* * * * * * * * * * * * *
*/
int i,j,k;
int indx;
real s1,s2,s3,visc;
real *divv = &vel[0];
real *vx1 = &vel[ntot];
real *vx2 = &vel[2*ntot];
real *vx3 = &vel[3*ntot];
real *F11 = &F_1[1*ntot];
real *F12 = &F_1[2*ntot];
real *F13 = &F_1[3*ntot];
real *F1e = &F_1[4*ntot];
#ifdef DIMS2
real *F21 = &F_2[1*ntot];
real *F22 = &F_2[2*ntot];
real *F23 = &F_2[3*ntot];
real *F2e = &F_2[4*ntot];
#endif
#ifdef DIMS3
real *F31 = &F_3[1*ntot];
real *F32 = &F_3[2*ntot];
real *F33 = &F_3[3*ntot];
real *F3e = &F_3[4*ntot];
#endif
s1=0; s2=0; s3=0;
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
/* X1 direction */
if ((i>=-NGHX1)&&(i<nx1+2)&&(j>=-NGHX2+1)&&(j<nx2+2)&&(k>=-NGHX3+1)&&(k<nx3+2)) {
visc= .5*(rho[indx] + rho[indx+1]) * kinematic_viscosity(x1[i] +.5*dx1[i], x2[j], x3[k]);
s1 = 2*(vx1[indx+1] - vx1[indx])/dx1[i] + (divv[indx] + divv[indx+1])/3.;
s2 = (vx2[indx+1]-vx2[indx])/dx1[i];
#ifdef DIMS2
s2 += .25*( (vx1[indx+1 + size_x1] - vx1[indx+1 - size_x1])/dx2[j+1] + (vx1[indx+size_x1]-vx1[indx-size_x1])/dx2[j]);
#endif
s3 = (vx3[indx+1]-vx3[indx])/dx1[i];
#ifdef DIMS3
s3 += .25*( (vx1[indx+1 + size_x12] - vx1[indx+1 - size_x12])/dx3[k+1] + (vx1[indx+size_x12]-vx1[indx-size_x12])/dx3[k]);
#endif
F11[indx] -= visc*s1;
F12[indx] -= visc*s2;
F13[indx] -= visc*s3;
F1e[indx] -= .5*visc*( (vx1[indx + 1] + vx1[indx])*s1
+(vx2[indx + 1] + vx2[indx])*s2
+(vx3[indx + 1] + vx3[indx])*s3);
}
/* X2 direction */
#ifdef DIMS2
if ((i>=-NGHX1+1)&&(i<nx1+2)&&(j>=-NGHX2)&&(j<nx2+2)&&(k>=-NGHX3+1)&&(k<nx3+2)) {
visc= .5*(rho[indx] + rho[indx+size_x1])* kinematic_viscosity(x1[i], x2[j] + .5*dx2[j], x3[k]);
s1 = .25*( (vx2[indx+size_x1 + 1] - vx2[indx+size_x1 - 1])/dx1[i+1] + (vx2[indx+1]-vx2[indx-1])/dx1[i])
+ (vx1[indx+size_x1]-vx1[indx])/dx2[j];
s2 = 2*(vx2[indx+size_x1] - vx2[indx])/dx2[j] + (divv[indx] +divv[indx+size_x1])/3;
s3 = (vx3[indx+size_x1]-vx3[indx])/dx2[j];
#ifdef DIMS3
s3 += .25*( (vx2[indx+size_x1 + size_x12] - vx1[indx+size_x1 - size_x12])/dx3[k+1]
+ (vx2[indx+size_x12]-vx1[indx-size_x12])/dx3[k]);
#endif
F21[indx] -= visc*s1;
F22[indx] -= visc*s2;
F23[indx] -= visc*s3;
F2e[indx] -= .5*visc*( (vx1[indx + size_x1] + vx1[indx])*s1
+(vx2[indx + size_x1] + vx2[indx])*s2
+(vx3[indx + size_x1] + vx3[indx])*s3);
}
#endif
/* X3 direction */
#ifdef DIMS3
if ((i>=-NGHX1+1)&&(i<nx1+2)&&(j>=-NGHX2+1)&&(j<nx2+2)&&(k>=-NGHX3)&&(k<nx3+2)) {
visc= .5*(rho[indx] + rho[indx+size_x12]) * kinematic_viscosity(x1[i], x2[j], x3[k] + .5*dx3[k]);
s1 = .25*( (vx3[indx+size_x12 + 1] - vx3[indx+size_x12 - 1])/dx1[i+1] + (vx3[indx+1]-vx3[indx-1])/dx1[i])
+ (vx1[indx+size_x12]-vx1[indx])/dx3[k];
s2 = .25*( (vx3[indx+size_x12 + size_x1] - vx3[indx+size_x12 - size_x1])/dx2[j+1]
+ (vx3[indx+size_x1]-vx3[indx-size_x1])/dx2[j])
+ (vx2[indx+size_x12]-vx2[indx])/dx3[k];
s3 = 2*(vx3[indx+size_x12] - vx3[indx])/dx3[k] + (divv[indx] +divv[indx+size_x12])/3;
F31[indx] -= visc*s1;
F32[indx] -= visc*s2;
F33[indx] -= visc*s3;
F3e[indx] -= .5*visc*( (vx1[indx + size_x12] + vx1[indx])*s1
+(vx2[indx + size_x12] + vx2[indx])*s2
+(vx3[indx + size_x12] + vx3[indx])*s3);
}
#endif
}
return;
}
#endif
| 3ea57ab7af37ed18522fd8235ad8e14d5e5bea04.cu | #include "defs.h"
#include "cuda_defs.h"
#ifdef VISCOSITY
extern __host__ __device__ real kinematic_viscosity(real x1, real x2, real x3);
__global__ void compute_divergence(real *cons, real *vel,
real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) {
int i,j,k,indx;
real vm,vc,vp;
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=-NGHX1+1)&&(i<nx1+2)&&(j>=-NGHX2+1)&&(j<nx2+2)&&(k>=-NGHX3+1)&&(k<nx3+2)) {
/* X1 direction */
vm = cons[indx - 1 + 1*ntot]/cons[indx - 1];
vc = cons[indx + 1*ntot]/cons[indx];
vp = cons[indx + 1 + 1*ntot]/cons[indx + 1];
vel[indx + 1*ntot] = vc;
vel[indx] = (vp-vm)/(2*dx1[i]);
/* X2 direction */
#ifdef DIMS2
vm = cons[indx - size_x1 + 2*ntot]/cons[indx - size_x1];
vc = cons[indx + 2*ntot]/cons[indx];
vp = cons[indx + size_x1 + 2*ntot]/cons[indx + size_x1];
vel[indx + 2*ntot] = vc;
vel[indx] += (vp-vm)/(2*dx2[j]);
#endif
/* X3 direction */
#ifdef DIMS3
vm = cons[indx - size_x12 + 3*ntot]/cons[indx - size_x12];
vc = cons[indx + 3*ntot]/cons[indx];
vp = cons[indx + size_x12 + 3*ntot]/cons[indx + size_x12];
vel[indx + 3*ntot] = vc;
vel[indx] += (vp-vm)/(2*dx3[k]);
#endif
}
}
return;
}
__global__ void viscous_flux(real *vel, real *rho, real *F_1, real *F_2, real *F_3,
real *dx1, real *dx2, real *dx3, real *x1, real *x2, real *x3,
int nx1, int nx2, int nx3, int size_x1, int size_x12, int ntot, int offset, int nf) {
/*
* * * * * * * * * * * * *
* *
* * *
* (i,j+1) (i+1,j+1) *
* * *
* *
* * F2[i,j] * * * * * * * *
* *
* * *
F1[i-1,j] (i,j) F1[i,j] (i+1,j) *
* * *
* *
* * F2[i,j-1]* * * * * * * *
* *
* * *
* (i,j-1) (i+1,j-1) *
* * *
* *
* * * * * * * * * * * * *
*/
int i,j,k;
int indx;
real s1,s2,s3,visc;
real *divv = &vel[0];
real *vx1 = &vel[ntot];
real *vx2 = &vel[2*ntot];
real *vx3 = &vel[3*ntot];
real *F11 = &F_1[1*ntot];
real *F12 = &F_1[2*ntot];
real *F13 = &F_1[3*ntot];
real *F1e = &F_1[4*ntot];
#ifdef DIMS2
real *F21 = &F_2[1*ntot];
real *F22 = &F_2[2*ntot];
real *F23 = &F_2[3*ntot];
real *F2e = &F_2[4*ntot];
#endif
#ifdef DIMS3
real *F31 = &F_3[1*ntot];
real *F32 = &F_3[2*ntot];
real *F33 = &F_3[3*ntot];
real *F3e = &F_3[4*ntot];
#endif
s1=0; s2=0; s3=0;
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
/* X1 direction */
if ((i>=-NGHX1)&&(i<nx1+2)&&(j>=-NGHX2+1)&&(j<nx2+2)&&(k>=-NGHX3+1)&&(k<nx3+2)) {
visc= .5*(rho[indx] + rho[indx+1]) * kinematic_viscosity(x1[i] +.5*dx1[i], x2[j], x3[k]);
s1 = 2*(vx1[indx+1] - vx1[indx])/dx1[i] + (divv[indx] + divv[indx+1])/3.;
s2 = (vx2[indx+1]-vx2[indx])/dx1[i];
#ifdef DIMS2
s2 += .25*( (vx1[indx+1 + size_x1] - vx1[indx+1 - size_x1])/dx2[j+1] + (vx1[indx+size_x1]-vx1[indx-size_x1])/dx2[j]);
#endif
s3 = (vx3[indx+1]-vx3[indx])/dx1[i];
#ifdef DIMS3
s3 += .25*( (vx1[indx+1 + size_x12] - vx1[indx+1 - size_x12])/dx3[k+1] + (vx1[indx+size_x12]-vx1[indx-size_x12])/dx3[k]);
#endif
F11[indx] -= visc*s1;
F12[indx] -= visc*s2;
F13[indx] -= visc*s3;
F1e[indx] -= .5*visc*( (vx1[indx + 1] + vx1[indx])*s1
+(vx2[indx + 1] + vx2[indx])*s2
+(vx3[indx + 1] + vx3[indx])*s3);
}
/* X2 direction */
#ifdef DIMS2
if ((i>=-NGHX1+1)&&(i<nx1+2)&&(j>=-NGHX2)&&(j<nx2+2)&&(k>=-NGHX3+1)&&(k<nx3+2)) {
visc= .5*(rho[indx] + rho[indx+size_x1])* kinematic_viscosity(x1[i], x2[j] + .5*dx2[j], x3[k]);
s1 = .25*( (vx2[indx+size_x1 + 1] - vx2[indx+size_x1 - 1])/dx1[i+1] + (vx2[indx+1]-vx2[indx-1])/dx1[i])
+ (vx1[indx+size_x1]-vx1[indx])/dx2[j];
s2 = 2*(vx2[indx+size_x1] - vx2[indx])/dx2[j] + (divv[indx] +divv[indx+size_x1])/3;
s3 = (vx3[indx+size_x1]-vx3[indx])/dx2[j];
#ifdef DIMS3
s3 += .25*( (vx2[indx+size_x1 + size_x12] - vx1[indx+size_x1 - size_x12])/dx3[k+1]
+ (vx2[indx+size_x12]-vx1[indx-size_x12])/dx3[k]);
#endif
F21[indx] -= visc*s1;
F22[indx] -= visc*s2;
F23[indx] -= visc*s3;
F2e[indx] -= .5*visc*( (vx1[indx + size_x1] + vx1[indx])*s1
+(vx2[indx + size_x1] + vx2[indx])*s2
+(vx3[indx + size_x1] + vx3[indx])*s3);
}
#endif
/* X3 direction */
#ifdef DIMS3
if ((i>=-NGHX1+1)&&(i<nx1+2)&&(j>=-NGHX2+1)&&(j<nx2+2)&&(k>=-NGHX3)&&(k<nx3+2)) {
visc= .5*(rho[indx] + rho[indx+size_x12]) * kinematic_viscosity(x1[i], x2[j], x3[k] + .5*dx3[k]);
s1 = .25*( (vx3[indx+size_x12 + 1] - vx3[indx+size_x12 - 1])/dx1[i+1] + (vx3[indx+1]-vx3[indx-1])/dx1[i])
+ (vx1[indx+size_x12]-vx1[indx])/dx3[k];
s2 = .25*( (vx3[indx+size_x12 + size_x1] - vx3[indx+size_x12 - size_x1])/dx2[j+1]
+ (vx3[indx+size_x1]-vx3[indx-size_x1])/dx2[j])
+ (vx2[indx+size_x12]-vx2[indx])/dx3[k];
s3 = 2*(vx3[indx+size_x12] - vx3[indx])/dx3[k] + (divv[indx] +divv[indx+size_x12])/3;
F31[indx] -= visc*s1;
F32[indx] -= visc*s2;
F33[indx] -= visc*s3;
F3e[indx] -= .5*visc*( (vx1[indx + size_x12] + vx1[indx])*s1
+(vx2[indx + size_x12] + vx2[indx])*s2
+(vx3[indx + size_x12] + vx3[indx])*s3);
}
#endif
}
return;
}
#endif
|
22118478bdfd4c0e5e3432a2fd8c638228947529.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* @author Yilu Guo
*
*
* Deep Regression Forests is open source code; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with Deep Regression Forests. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause
for more information.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/neural_decision_reg_forest_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/sampling.hpp"
#include "caffe/util/neural_decision_util_functions.hpp"
#include "caffe/util/benchmark.hpp"
#ifndef PI
#define PI 3.1415926
#endif
#ifndef GPU_DEBUG
#define GPU_DEBUG 0
#endif
namespace caffe
{
/*
template <typename Dtype>
inline Dtype gaussian_1d(Dtype x, Dtype mu, Dtype sigma_square)
{
sigma_square = ::max(sigma_square, (Dtype) FLT_MIN);
return (Dtype)1.0 / sqrt(2 * PI * sigma_square) * exp(-(x - mu) * (x - mu) / (2 * sigma_square));
}
template <typename Dtype>
Dtype difference(const int n, Dtype* a, Dtype *b)
{
Dtype* c = new Dtype [n];
caffe_sub(n, a, b, c);
Dtype d = caffe_cpu_asum(n, c) / Dtype(n);
delete []c;
return d;
}
*/
template <typename Dtype>
bool isdiff(Dtype x, Dtype y) {
Dtype THRES = 0.000002;
return std::abs(x - y) >= THRES;
}
__device__ int sub2ind_reg(int n, int c, int h, int w, int N, int C, int H, int W) {
return ((n * C + c) * H + h) * W + w;
}
__device__ int ind2sub_reg(int index, int C, int H, int W, int* n, int* c, int* h, int* w) {
*w = index % W;
*h = (index / W) % H;
*c = (index / (W*H)) % C;
*n = index / (C*W*H);
return 0;
}
template <typename Dtype>
__device__ Dtype multivariate_gaussian_gpu(Dtype y, Dtype mu, Dtype sigma_square, int num_classes){
return (float)1.0 / sqrt(2 * PI * (sigma_square + Dtype(FLT_MIN))) * (exp(-(y - mu) * (y - mu) / (2 * (sigma_square + Dtype(FLT_MIN))))+ Dtype(FLT_MIN));
}
template <typename Dtype>
__global__ void kernel_updata_all_reg(int num_outer, int num_inner,
int num_trees, int num_leaf_nodes_per_tree, int num_classes, int iter_times, const Dtype* mu_all, const Dtype* sigma_all,
Dtype const ** routing_vec, Dtype const ** label_vec, Dtype ** tree_prediction_vec) {
int count = num_outer * num_inner * num_trees * iter_times;
CUDA_KERNEL_LOOP(index, count) {
int t, k, i, iter;
int idx = index;
ind2sub_reg(idx, num_outer, num_inner, num_trees, &iter, &i, &k, &t);
const Dtype* label_data = label_vec[iter];
Dtype* tree_prediction_prob_data = tree_prediction_vec[iter];
const Dtype* routing_leaf_prob_data = routing_vec[iter];
const Dtype y = label_data[sub2ind_reg(i, k, 0, 0, num_outer, num_inner, num_classes, 1)];
for(int j = 0; j < num_leaf_nodes_per_tree; j++) {
const Dtype mu = mu_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
const Dtype sigma_square = sigma_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
tree_prediction_prob_data[sub2ind_reg(i, k, t, 0, num_outer, num_inner, num_trees, num_classes)] +=
routing_leaf_prob_data[sub2ind_reg(i, k, t, j, num_outer, num_inner, num_trees, num_leaf_nodes_per_tree)] *
max(multivariate_gaussian_gpu(y, mu, sigma_square, num_classes),Dtype(FLT_MIN));
}
}
}
template <typename Dtype>
__global__ void kernel_mean_sig_reg(int num_trees, int num_leaf_nodes_per_tree, int num_outer, int num_inner, int iter_times,
const Dtype* mu_all, const Dtype* sigma_all, Dtype const ** label_vec, Dtype const ** routing_vec, Dtype const ** tree_prediction_vec,
Dtype* mean_temp, Dtype* sigma_temp) {
CUDA_KERNEL_LOOP(index, num_trees * num_leaf_nodes_per_tree) {
int t, j;
int idx = index;
int num_classes = 1;
j = idx % num_leaf_nodes_per_tree;
t = idx / num_leaf_nodes_per_tree;
Dtype zeta_sum = (Dtype) 0.0;
Dtype mu_new;
Dtype zeta;
for (int iter = 0; iter < iter_times; iter++){
const Dtype* label_data = label_vec[iter];
const Dtype* tree_prediction_prob_data = tree_prediction_vec[iter];
const Dtype* routing_leaf_prob_data = routing_vec[iter];
for (int i = 0; i < num_outer; i++){
for (int k = 0; k < num_inner; k++){
const Dtype y = label_data[sub2ind_reg(i, k, 0, 0, num_outer, num_inner, num_classes, 1)];
const Dtype mu = mu_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
const Dtype sigma_square = sigma_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
zeta = max(multivariate_gaussian_gpu(y, mu, sigma_square, num_classes), Dtype(FLT_MIN))
* routing_leaf_prob_data[sub2ind_reg(i, k, t, j, num_outer, num_inner, num_trees, num_leaf_nodes_per_tree)]
/ max(tree_prediction_prob_data[sub2ind_reg(i, k, t, 0, num_outer, num_inner, num_trees, num_classes)], Dtype(FLT_MIN));
mean_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)] += zeta*y;
zeta_sum += zeta;
}
}
}
mean_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)] *= (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN));
for (int iter = 0; iter < iter_times; iter++){
const Dtype* label_data = label_vec[iter];
const Dtype* tree_prediction_prob_data = tree_prediction_vec[iter];
const Dtype* routing_leaf_prob_data = routing_vec[iter];
for (int i = 0; i < num_outer; i++){
for (int k = 0; k < num_inner; k++){
const Dtype y = label_data[sub2ind_reg(i, k, 0, 0, num_outer, num_inner, num_classes, 1)];
const Dtype mu = mu_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
const Dtype sigma_square = sigma_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
Dtype zeta= max(multivariate_gaussian_gpu(y, mu, sigma_square, num_classes), Dtype(FLT_MIN))
* routing_leaf_prob_data[sub2ind_reg(i, k, t, j, num_outer, num_inner, num_trees, num_leaf_nodes_per_tree)]
/ max(tree_prediction_prob_data[sub2ind_reg(i, k, t, 0, num_outer, num_inner, num_trees, num_classes)], Dtype(FLT_MIN));
mu_new = mean_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
mu_new = y - mu_new;
sigma_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, num_classes)] += zeta*mu_new*mu_new;
}
}
}
sigma_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, num_classes)] *= (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN));
sigma_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, num_classes)] += (Dtype) FLT_EPSILON;
}
}
template <typename Dtype>
__global__ void kernel_backward_all_reg(Dtype* bottom_diff, Dtype* inter_data, const Dtype* tree_pred, const Dtype* mean_data,const Dtype* label_data,
const Dtype* labelweight, const Dtype* routing_lf, const Dtype* dn_data, const Dtype* dim_offset,
int num_outer, int num_inner, int num_trees, int num_leaf, int num_split,
int h, int w, int num_classes, int num_dims_, const Dtype scale_) {
int num_nodes = num_split + num_leaf;
CUDA_KERNEL_LOOP(index, num_outer) {
for (int i=0; i<num_inner; ++i) {
for(int t= 0; t < num_trees; ++t) {
for (int l=0; l<num_leaf; ++l) {
int inter_idx = sub2ind_reg(index,i,t,num_split+l, num_outer, num_inner, num_trees,num_nodes);
int routing_lf_idx = sub2ind_reg(index, i, t, l, num_outer, num_inner, num_trees, num_leaf);
for (int c=0; c<num_classes; ++c) {
int lb_idx = sub2ind_reg(index,c,i/w,i%w, num_outer,num_classes,h,w);
const Dtype label_value=label_data[lb_idx]/scale_;
const Dtype label_weight=0.0001 * (labelweight[lb_idx]);
int tree_pred_idx = sub2ind_reg(index, i, t, c, num_outer, num_inner, num_trees, num_classes);
int mean_idx = sub2ind_reg(t, l, c, 0, num_trees, num_leaf, num_classes, 1);
inter_data[inter_idx] += label_weight * (label_value - tree_pred[tree_pred_idx]) * mean_data[mean_idx];
}
inter_data[inter_idx] *= routing_lf[routing_lf_idx];
}
}
for (int n=num_split-1; n>=0; --n) {
for(int t = 0; t < num_trees; t++) {
int dim_offset_idx = sub2ind_reg(t,n,0,0, num_trees,num_split,1,1);
int diff_idx = sub2ind_reg(index,dim_offset[dim_offset_idx],i/w,i%w, num_outer,num_dims_,h,w);
int inter_left_idx = sub2ind_reg(index,i,t,2*n+1,num_outer,num_inner,num_trees,num_nodes);
int inter_right_idx = inter_left_idx + 1;
bottom_diff[diff_idx] = (
dn_data[diff_idx] * inter_data[inter_right_idx] -
(Dtype(1.0) - dn_data[diff_idx]) * inter_data[inter_left_idx]);
int inter_parent_idx = sub2ind_reg(index,i,t,n,num_outer,num_inner,num_trees,num_nodes);
inter_data[inter_parent_idx] = inter_data[inter_left_idx] + inter_data[inter_right_idx];
}
}
}
}
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::UpdateTreePredictionAllDataGPU()
{
if (num_classes_==1){
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(1);
Dtype ** tree_prediction_vec = new Dtype * [iter_times_in_epoch_];
Dtype const ** routing_vec = new Dtype const * [iter_times_in_epoch_];
Dtype const ** all_label_vec = new Dtype const * [iter_times_in_epoch_];
for (int iter = 0; iter < iter_times_in_epoch_; iter++) {
tree_prediction_vec[iter] = tree_prediction_all_data_prob_density_vec_[iter].get()->mutable_gpu_data();
hipMemset(tree_prediction_vec[iter], 0, sizeof(Dtype)* tree_prediction_all_data_prob_density_vec_[iter].get()->count());
routing_vec[iter] = routing_leaf_all_data_prob_vec_[iter].get()->gpu_data();
all_label_vec[iter] = all_data_label_vec_[iter].get()->gpu_data();
}
Dtype ** gpu_tree_prediction_vec;
Dtype const ** gpu_routing_vec;
Dtype const ** gpu_all_label_vec;
hipMalloc((void**)&gpu_tree_prediction_vec, sizeof(Dtype *)*iter_times_in_epoch_);
hipMemcpy(gpu_tree_prediction_vec, tree_prediction_vec, sizeof(Dtype *)*iter_times_in_epoch_, hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_routing_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
hipMemcpy(gpu_routing_vec, routing_vec, sizeof(Dtype const*)*iter_times_in_epoch_, hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
hipMemcpy(gpu_all_label_vec, all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_updata_all_reg<Dtype>), dim3(CAFFE_GET_BLOCKS(num_outer_iter*num_inner_iter*iter_times_in_epoch_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_outer_iter, num_inner_iter, num_trees_, num_leaf_nodes_per_tree_, num_classes_,iter_times_in_epoch_,
mean_->gpu_data(), sigma_square_->gpu_data(), gpu_routing_vec, gpu_all_label_vec, gpu_tree_prediction_vec);
} else {
for (int iter = 0; iter < iter_times_in_epoch_; iter++){
Dtype* tree_prediction_all_data_prob_density_data = tree_prediction_all_data_prob_density_vec_[iter].get()->mutable_cpu_data();
memset(tree_prediction_all_data_prob_density_data, 0, sizeof(Dtype)* tree_prediction_all_data_prob_density_vec_[iter].get()->count());
const Dtype* routing_leaf_all_data_prob_data = routing_leaf_all_data_prob_vec_[iter].get()->cpu_data();
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(1);
for (int i = 0; i < num_outer_iter; i++){
for (int k = 0; k < num_inner_iter; k++){
const Dtype* y = all_data_label_vec_[iter].get()->cpu_data() + all_data_label_vec_[iter].get()->offset(i, k, 0, 0);
for (int t = 0; t < num_trees_; t++){
for (int j = 0; j < num_leaf_nodes_per_tree_; j++){
tree_prediction_all_data_prob_density_data[tree_prediction_all_data_prob_density_vec_[iter].get()->offset(i, k, t, 0)] +=
routing_leaf_all_data_prob_data[routing_leaf_all_data_prob_vec_[iter].get()->offset(i, k, t, j)] *
max(multivariate_gaussian(y, mean_->cpu_data() + mean_->offset(t, j, 0, 0), sigma_square_->cpu_data() + sigma_square_->offset(t, j, 0, 0), num_classes_),Dtype(FLT_MIN));
}
}
}
}
}
}
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::UpdateClassLabelDistrGPU() {
num_epoch_++;
LOG(INFO) << "Epoch " << num_epoch_ <<": Start updating class label distribution";
int iter_times = 0;
Dtype* mu_new = new Dtype [num_classes_];
if (num_classes_==1){
Blob<Dtype> mean_temp(mean_->shape());
Dtype* mean_temp_data = mean_temp.mutable_gpu_data();
Blob<Dtype> sigma_square_temp(sigma_square_->shape());
Dtype* sigma_square_temp_data = sigma_square_temp.mutable_gpu_data();
while (iter_times < iter_times_class_label_distr_) {
LOG(INFO) << "Label distribution update iteration " << iter_times;
UpdateTreePredictionAllDataGPU();
hipMemset(mean_temp.mutable_gpu_data(), 0, sizeof(Dtype)* mean_temp.count());
hipMemset(sigma_square_temp.mutable_gpu_data(), 0, sizeof(Dtype)* sigma_square_temp.count());
Dtype const ** tree_prediction_vec = new Dtype const* [iter_times_in_epoch_];
Dtype const ** routing_vec = new Dtype const * [iter_times_in_epoch_];
Dtype const ** all_label_vec = new Dtype const * [iter_times_in_epoch_];
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(1);
for (int iter = 0; iter < iter_times_in_epoch_; iter++) {
tree_prediction_vec[iter] = tree_prediction_all_data_prob_density_vec_[iter].get()->gpu_data();
routing_vec[iter] = routing_leaf_all_data_prob_vec_[iter].get()->gpu_data();
all_label_vec[iter] = all_data_label_vec_[iter].get()->gpu_data();
}
Dtype const ** gpu_tree_prediction_vec;
Dtype const ** gpu_routing_vec;
Dtype const ** gpu_all_label_vec;
hipMalloc((void**)&gpu_tree_prediction_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
hipMemcpy(gpu_tree_prediction_vec, tree_prediction_vec, sizeof(Dtype const *)*iter_times_in_epoch_, hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_routing_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
hipMemcpy(gpu_routing_vec, routing_vec, sizeof(Dtype const*)*iter_times_in_epoch_, hipMemcpyHostToDevice);
hipMalloc((void**)&gpu_all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
hipMemcpy(gpu_all_label_vec, all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_mean_sig_reg<Dtype>), dim3(CAFFE_GET_BLOCKS(num_trees_ * num_leaf_nodes_per_tree_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_trees_, num_leaf_nodes_per_tree_, num_outer_iter, num_inner_iter, iter_times_in_epoch_,
mean_->gpu_data(), sigma_square_->gpu_data(), gpu_all_label_vec,
gpu_routing_vec, gpu_tree_prediction_vec, mean_temp_data, sigma_square_temp_data);
delete [] tree_prediction_vec; tree_prediction_vec=NULL;
delete [] routing_vec; routing_vec=NULL;
delete [] all_label_vec; all_label_vec=NULL;
memcpy(mean_->mutable_cpu_data(), mean_temp.cpu_data(), sizeof(Dtype) * mean_->count());
memcpy(sigma_square_->mutable_cpu_data(), sigma_square_temp.cpu_data(), sizeof(Dtype) * sigma_square_->count());
iter_times++;
}
} else {
Blob<Dtype> mean_temp(mean_->shape());
Dtype* mean_temp_data = mean_temp.mutable_cpu_data();
Blob<Dtype> sigma_square_temp(sigma_square_->shape());
Dtype* sigma_square_temp_data = sigma_square_temp.mutable_cpu_data();
while (iter_times < iter_times_class_label_distr_){
LOG(INFO) << "Label distribution update iteration " << iter_times;
UpdateTreePredictionAllData();
memset(mean_temp_data, 0, sizeof(Dtype)* mean_temp.count());
memset(sigma_square_temp_data, 0, sizeof(Dtype) * sigma_square_temp.count());
of_ << "Iter " << iter_times <<":" << "\n";
for (int t = 0; t < num_trees_; t++){
for (int j = 0; j < num_leaf_nodes_per_tree_; j++){
Dtype zeta_sum = (Dtype) 0.0;
const Dtype* mu = mean_->cpu_data() + mean_->offset(t, j, 0, 0);
const Dtype* sigma_square = sigma_square_->cpu_data() + sigma_square_->offset(t, j, 0, 0);
for (int iter = 0; iter < iter_times_in_epoch_; iter++){
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(1);
for (int i = 0; i < num_outer_iter; i++){
for (int k = 0; k < num_inner_iter; k++){
const Dtype* y = all_data_label_vec_[iter].get()->cpu_data() + all_data_label_vec_[iter].get()->offset(i, k, 0, 0);
Dtype zeta = max(multivariate_gaussian(y, mu, sigma_square, num_classes_), Dtype(FLT_MIN)) * routing_leaf_all_data_prob_vec_[iter].get()->data_at(i, k, t, j)
/ max(tree_prediction_all_data_prob_density_vec_[iter].get()->data_at(i, k, t, 0), Dtype(FLT_MIN));
caffe_axpy(num_classes_, zeta, y, mean_temp_data + mean_temp.offset(t, j, 0, 0));
zeta_sum += zeta;
}
}
}
caffe_scal(num_classes_, (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN)), mean_temp_data + mean_temp.offset(t, j, 0, 0));
for (int iter = 0; iter < iter_times_in_epoch_; iter++){
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(1);
for (int i = 0; i < num_outer_iter; i++){
for (int k = 0; k < num_inner_iter; k++){
const Dtype* y = all_data_label_vec_[iter].get()->cpu_data() + all_data_label_vec_[iter].get()->offset(i, k, 0, 0);
Dtype zeta = max(multivariate_gaussian(y, mu, sigma_square, num_classes_), Dtype(FLT_MIN)) * routing_leaf_all_data_prob_vec_[iter].get()->data_at(i, k, t, j)
/ max(tree_prediction_all_data_prob_density_vec_[iter].get()->data_at(i, k, t, 0), Dtype(FLT_MIN));
memcpy(mu_new, mean_temp_data + mean_temp.offset(t, j, 0, 0), sizeof(Dtype) * num_classes_);
caffe_sub(num_classes_, y, mu_new, mu_new);
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_classes_, num_classes_, 1, zeta, mu_new, mu_new, (Dtype) 1.0, sigma_square_temp_data + sigma_square_temp.offset(t, j, 0, 0));
}
}
}
caffe_scal(num_classes_ * num_classes_, (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN)), sigma_square_temp_data + sigma_square_temp.offset(t, j, 0, 0));
caffe_add_scalar(num_classes_, (Dtype) FLT_EPSILON, sigma_square_temp_data + sigma_square_temp.offset(t, j, 0, 0));
}
}
memcpy(mean_->mutable_cpu_data(), mean_temp_data, sizeof(Dtype) * mean_->count());
memcpy(sigma_square_->mutable_cpu_data(), sigma_square_temp_data, sizeof(Dtype) * sigma_square_->count());
iter_times++;
}
}
LOG(INFO) << "Epoch" << num_epoch_ << ": End updating class label distribution";
delete [] mu_new; mu_new = NULL;
RecordClassLabelDistr();
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
tree_for_training_ = caffe_rng_rand() % num_trees_;
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
Dtype* routing_split_prob_data = routing_split_prob_.mutable_gpu_data();
Dtype* routing_leaf_prob_data = routing_leaf_prob_.mutable_gpu_data();
const Dtype* dn_data = dn_->gpu_data();
const Dtype* sub_dimensions_data = sub_dimensions_->gpu_data();
hipLaunchKernelGGL(( kernel_routing<Dtype>) , dim3(CAFFE_GET_BLOCKS(num_outer_ * num_inner_ * num_trees_)),dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
num_outer_, num_trees_, num_dims_, bottom[0]->height(), bottom[0]->width(), num_leaf_nodes_per_tree_, num_split_nodes_per_tree_, dn_data,
sub_dimensions_data, routing_split_prob_data, routing_leaf_prob_data);
const Dtype* mean_data = mean_->cpu_data();
Dtype* routing_leaf_all_data_prob_data = routing_leaf_all_data_prob_vec_[iter_times_ % all_data_vec_length_].get()->mutable_cpu_data();
Dtype* all_data_label_data = all_data_label_vec_[iter_times_ % all_data_vec_length_].get()->mutable_cpu_data();
Dtype* tree_prediction_data = tree_prediction_.mutable_cpu_data();
caffe_set(tree_prediction_.count(), (Dtype) 0.0, tree_prediction_data);
Dtype loss = (Dtype) 0.0;
int count = 0;
for (int i = 0; i < num_outer_; i++){
for (int k = 0; k < num_inner_; k++){
memcpy(routing_leaf_all_data_prob_data + routing_leaf_all_data_prob_vec_[iter_times_ % all_data_vec_length_].get()->offset(i, k, 0, 0),
routing_leaf_prob_.cpu_data() + routing_leaf_prob_.offset(i, k, 0, 0), sizeof(Dtype)* num_leaf_nodes_per_tree_ * num_trees_);
if(drop_out_)
{
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 1, num_classes_, num_leaf_nodes_per_tree_,
(Dtype)1.0, routing_leaf_prob_.cpu_data() + routing_leaf_prob_.offset(i, k, tree_for_training_, 0),
mean_data + mean_->offset(tree_for_training_, 0, 0, 0),
(Dtype)0.0, tree_prediction_data + tree_prediction_.offset(i, k, tree_for_training_, 0));
}
else
{
for(int t = 0; t < num_trees_; t++)
{
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 1, num_classes_, num_leaf_nodes_per_tree_,
(Dtype)1.0, routing_leaf_prob_.cpu_data() + routing_leaf_prob_.offset(i, k, t, 0),
mean_data + mean_->offset(t, 0, 0, 0),
(Dtype)0.0, tree_prediction_data + tree_prediction_.offset(i, k, t, 0));
}
}
for(int j = 0; j < num_classes_; ++j)
{
const Dtype label_value = bottom[1]->data_at(i, j, k / dn_->width(), k % dn_->width())/scale_;
const Dtype label_weight = 0.0001 * (bottom[2]->data_at(i, j, k / dn_->width(), k % dn_->width()));
all_data_label_data[all_data_label_vec_[iter_times_ % all_data_vec_length_].get()->offset(i, k, j, 0)]
= label_value;
if (drop_out_)
{
loss += 0.5 * label_weight * (label_value - tree_prediction_.data_at(i, k, tree_for_training_, j)) * (label_value - tree_prediction_.data_at(i, k, tree_for_training_, j));
}
else
{
for(int t = 0; t < num_trees_; t++)
{
loss += 0.5 * label_weight * (label_value - tree_prediction_.data_at(i, k, t, j)) * (label_value - tree_prediction_.data_at(i, k, t, j));
}
}
}
count++;
}
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
if (propagate_down[1])
{
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0])
{
int count=0;
if (drop_out_) {
LOG(FATAL)<<"not implement";
caffe_set(mean_->count(), static_cast<Dtype>(0), mean_->mutable_cpu_diff());
caffe_set(sigma_square_->count(), static_cast<Dtype>(0), sigma_square_->mutable_cpu_diff());
caffe_set(sub_dimensions_->count(), static_cast<Dtype>(0), sub_dimensions_->mutable_cpu_diff());
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
Dtype* inter_var_data = inter_var_.mutable_cpu_data();
memset(inter_var_data, (Dtype) 0.0, sizeof(Dtype) * inter_var_.count());
const Dtype* dn_data = dn_->cpu_data();
for (int i = 0; i < num_outer_; i++){
for (int k = 0; k < num_inner_; k++){
int t = tree_for_training_;{
for (int l = 0; l < num_leaf_nodes_per_tree_; l++){
for (int j = 0; j < num_classes_; j++){
const Dtype label_value = bottom[1]->data_at(i, j, k / dn_->width(), k % dn_->width())/scale_;
const Dtype label_weight = 0.0001 * (bottom[2]->data_at(i, j, k / dn_->width(), k % dn_->width()));
inter_var_data[inter_var_.offset(i, k, t, num_split_nodes_per_tree_ + l)] += label_weight * (label_value - tree_prediction_.data_at(i, k, t, j)) * mean_->data_at(t, l, j, 0);
}
inter_var_data[inter_var_.offset(i, k, t, num_split_nodes_per_tree_ + l)] *= routing_leaf_prob_.data_at(i, k, t, l);
}
for (int n = num_split_nodes_per_tree_ - 1; n >= 0; n--){
int dim_offset = (int)sub_dimensions_->data_at(t, n, 0, 0);
bottom_diff[bottom[0]->offset(i, dim_offset, k / bottom[0]->width(), k % bottom[0]->width())] =
dn_data[bottom[0]->offset(i, dim_offset, k / bottom[0]->width(), k % bottom[0]->width())] * inter_var_.data_at(i, k, t, 2 * n + 2)
- ((Dtype)1.0 - dn_data[bottom[0]->offset(i, dim_offset, k / bottom[0]->width(), k % bottom[0]->width())]) * inter_var_.data_at(i, k, t, 2 * n + 1);
inter_var_data[inter_var_.offset(i, k, t, n)] = inter_var_.data_at(i, k, t, 2 * n + 2) + inter_var_.data_at(i, k, t, 2 * n + 1);
}
count++;
}
}
}
} else {
hipMemset(mean_->mutable_gpu_diff(), 0, sizeof(Dtype)*mean_->count());
hipMemset(sigma_square_->mutable_gpu_diff(), 0, sizeof(Dtype)*sigma_square_->count());
hipMemset(sub_dimensions_->mutable_gpu_diff(), 0, sizeof(Dtype)*sub_dimensions_->count());
hipMemset(bottom[0]->mutable_gpu_diff(), 0, sizeof(Dtype)*bottom[0]->count());
hipMemset(inter_var_.mutable_gpu_data(), 0, sizeof(Dtype)*inter_var_.count());
CHECK_EQ(dn_->width(), bottom[1]->width());
CHECK_EQ(dn_->height(), bottom[1]->height());
CHECK_EQ(dn_->width(), bottom[2]->width());
CHECK_EQ(dn_->height(), bottom[2]->height());
hipLaunchKernelGGL(( kernel_backward_all_reg<Dtype>), dim3(CAFFE_GET_BLOCKS(num_outer_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->mutable_gpu_diff(), inter_var_.mutable_gpu_data(),tree_prediction_.gpu_data(),mean_->gpu_data(),
bottom[1]->gpu_data(), bottom[2]->gpu_data(), routing_leaf_prob_.gpu_data(), dn_->gpu_data(), sub_dimensions_->gpu_data(),
num_outer_, num_inner_,num_trees_, num_leaf_nodes_per_tree_, num_split_nodes_per_tree_, dn_->height(),
dn_->width(), num_classes_, num_dims_, scale_);
count = num_outer_*num_inner_*num_trees_;
}
// Scale down gradient
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_scal(bottom[0]->count(), loss_weight / get_normalizer(normalization_, count), bottom[0]->mutable_cpu_diff());
}
if (iter_times_ && (iter_times_ + 1) % iter_times_in_epoch_ == 0)
UpdateClassLabelDistrGPU();
iter_times_++;
}
INSTANTIATE_LAYER_GPU_FUNCS(NeuralDecisionRegForestWithLossLayer);
}
| 22118478bdfd4c0e5e3432a2fd8c638228947529.cu | /*
* @author Yilu Guo
*
*
* Deep Regression Forests is open source code; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
* 3-clause BSD license along with Deep Regression Forests. If not, see
* http://www.opensource.org/licenses/BSD-3-Clause
for more information.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/neural_decision_reg_forest_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/sampling.hpp"
#include "caffe/util/neural_decision_util_functions.hpp"
#include "caffe/util/benchmark.hpp"
#ifndef PI
#define PI 3.1415926
#endif
#ifndef GPU_DEBUG
#define GPU_DEBUG 0
#endif
namespace caffe
{
/*
template <typename Dtype>
inline Dtype gaussian_1d(Dtype x, Dtype mu, Dtype sigma_square)
{
sigma_square = std::max(sigma_square, (Dtype) FLT_MIN);
return (Dtype)1.0 / sqrt(2 * PI * sigma_square) * exp(-(x - mu) * (x - mu) / (2 * sigma_square));
}
template <typename Dtype>
Dtype difference(const int n, Dtype* a, Dtype *b)
{
Dtype* c = new Dtype [n];
caffe_sub(n, a, b, c);
Dtype d = caffe_cpu_asum(n, c) / Dtype(n);
delete []c;
return d;
}
*/
template <typename Dtype>
bool isdiff(Dtype x, Dtype y) {
Dtype THRES = 0.000002;
return std::abs(x - y) >= THRES;
}
__device__ int sub2ind_reg(int n, int c, int h, int w, int N, int C, int H, int W) {
return ((n * C + c) * H + h) * W + w;
}
__device__ int ind2sub_reg(int index, int C, int H, int W, int* n, int* c, int* h, int* w) {
*w = index % W;
*h = (index / W) % H;
*c = (index / (W*H)) % C;
*n = index / (C*W*H);
return 0;
}
template <typename Dtype>
__device__ Dtype multivariate_gaussian_gpu(Dtype y, Dtype mu, Dtype sigma_square, int num_classes){
return (float)1.0 / sqrt(2 * PI * (sigma_square + Dtype(FLT_MIN))) * (exp(-(y - mu) * (y - mu) / (2 * (sigma_square + Dtype(FLT_MIN))))+ Dtype(FLT_MIN));
}
template <typename Dtype>
__global__ void kernel_updata_all_reg(int num_outer, int num_inner,
int num_trees, int num_leaf_nodes_per_tree, int num_classes, int iter_times, const Dtype* mu_all, const Dtype* sigma_all,
Dtype const ** routing_vec, Dtype const ** label_vec, Dtype ** tree_prediction_vec) {
int count = num_outer * num_inner * num_trees * iter_times;
CUDA_KERNEL_LOOP(index, count) {
int t, k, i, iter;
int idx = index;
ind2sub_reg(idx, num_outer, num_inner, num_trees, &iter, &i, &k, &t);
const Dtype* label_data = label_vec[iter];
Dtype* tree_prediction_prob_data = tree_prediction_vec[iter];
const Dtype* routing_leaf_prob_data = routing_vec[iter];
const Dtype y = label_data[sub2ind_reg(i, k, 0, 0, num_outer, num_inner, num_classes, 1)];
for(int j = 0; j < num_leaf_nodes_per_tree; j++) {
const Dtype mu = mu_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
const Dtype sigma_square = sigma_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
tree_prediction_prob_data[sub2ind_reg(i, k, t, 0, num_outer, num_inner, num_trees, num_classes)] +=
routing_leaf_prob_data[sub2ind_reg(i, k, t, j, num_outer, num_inner, num_trees, num_leaf_nodes_per_tree)] *
max(multivariate_gaussian_gpu(y, mu, sigma_square, num_classes),Dtype(FLT_MIN));
}
}
}
template <typename Dtype>
__global__ void kernel_mean_sig_reg(int num_trees, int num_leaf_nodes_per_tree, int num_outer, int num_inner, int iter_times,
const Dtype* mu_all, const Dtype* sigma_all, Dtype const ** label_vec, Dtype const ** routing_vec, Dtype const ** tree_prediction_vec,
Dtype* mean_temp, Dtype* sigma_temp) {
CUDA_KERNEL_LOOP(index, num_trees * num_leaf_nodes_per_tree) {
int t, j;
int idx = index;
int num_classes = 1;
j = idx % num_leaf_nodes_per_tree;
t = idx / num_leaf_nodes_per_tree;
Dtype zeta_sum = (Dtype) 0.0;
Dtype mu_new;
Dtype zeta;
for (int iter = 0; iter < iter_times; iter++){
const Dtype* label_data = label_vec[iter];
const Dtype* tree_prediction_prob_data = tree_prediction_vec[iter];
const Dtype* routing_leaf_prob_data = routing_vec[iter];
for (int i = 0; i < num_outer; i++){
for (int k = 0; k < num_inner; k++){
const Dtype y = label_data[sub2ind_reg(i, k, 0, 0, num_outer, num_inner, num_classes, 1)];
const Dtype mu = mu_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
const Dtype sigma_square = sigma_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
zeta = max(multivariate_gaussian_gpu(y, mu, sigma_square, num_classes), Dtype(FLT_MIN))
* routing_leaf_prob_data[sub2ind_reg(i, k, t, j, num_outer, num_inner, num_trees, num_leaf_nodes_per_tree)]
/ max(tree_prediction_prob_data[sub2ind_reg(i, k, t, 0, num_outer, num_inner, num_trees, num_classes)], Dtype(FLT_MIN));
mean_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)] += zeta*y;
zeta_sum += zeta;
}
}
}
mean_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)] *= (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN));
for (int iter = 0; iter < iter_times; iter++){
const Dtype* label_data = label_vec[iter];
const Dtype* tree_prediction_prob_data = tree_prediction_vec[iter];
const Dtype* routing_leaf_prob_data = routing_vec[iter];
for (int i = 0; i < num_outer; i++){
for (int k = 0; k < num_inner; k++){
const Dtype y = label_data[sub2ind_reg(i, k, 0, 0, num_outer, num_inner, num_classes, 1)];
const Dtype mu = mu_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
const Dtype sigma_square = sigma_all[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
Dtype zeta= max(multivariate_gaussian_gpu(y, mu, sigma_square, num_classes), Dtype(FLT_MIN))
* routing_leaf_prob_data[sub2ind_reg(i, k, t, j, num_outer, num_inner, num_trees, num_leaf_nodes_per_tree)]
/ max(tree_prediction_prob_data[sub2ind_reg(i, k, t, 0, num_outer, num_inner, num_trees, num_classes)], Dtype(FLT_MIN));
mu_new = mean_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, 1)];
mu_new = y - mu_new;
sigma_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, num_classes)] += zeta*mu_new*mu_new;
}
}
}
sigma_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, num_classes)] *= (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN));
sigma_temp[sub2ind_reg(t, j, 0, 0, num_trees, num_leaf_nodes_per_tree, num_classes, num_classes)] += (Dtype) FLT_EPSILON;
}
}
template <typename Dtype>
__global__ void kernel_backward_all_reg(Dtype* bottom_diff, Dtype* inter_data, const Dtype* tree_pred, const Dtype* mean_data,const Dtype* label_data,
const Dtype* labelweight, const Dtype* routing_lf, const Dtype* dn_data, const Dtype* dim_offset,
int num_outer, int num_inner, int num_trees, int num_leaf, int num_split,
int h, int w, int num_classes, int num_dims_, const Dtype scale_) {
int num_nodes = num_split + num_leaf;
CUDA_KERNEL_LOOP(index, num_outer) {
for (int i=0; i<num_inner; ++i) {
for(int t= 0; t < num_trees; ++t) {
for (int l=0; l<num_leaf; ++l) {
int inter_idx = sub2ind_reg(index,i,t,num_split+l, num_outer, num_inner, num_trees,num_nodes);
int routing_lf_idx = sub2ind_reg(index, i, t, l, num_outer, num_inner, num_trees, num_leaf);
for (int c=0; c<num_classes; ++c) {
int lb_idx = sub2ind_reg(index,c,i/w,i%w, num_outer,num_classes,h,w);
const Dtype label_value=label_data[lb_idx]/scale_;
const Dtype label_weight=0.0001 * (labelweight[lb_idx]);
int tree_pred_idx = sub2ind_reg(index, i, t, c, num_outer, num_inner, num_trees, num_classes);
int mean_idx = sub2ind_reg(t, l, c, 0, num_trees, num_leaf, num_classes, 1);
inter_data[inter_idx] += label_weight * (label_value - tree_pred[tree_pred_idx]) * mean_data[mean_idx];
}
inter_data[inter_idx] *= routing_lf[routing_lf_idx];
}
}
for (int n=num_split-1; n>=0; --n) {
for(int t = 0; t < num_trees; t++) {
int dim_offset_idx = sub2ind_reg(t,n,0,0, num_trees,num_split,1,1);
int diff_idx = sub2ind_reg(index,dim_offset[dim_offset_idx],i/w,i%w, num_outer,num_dims_,h,w);
int inter_left_idx = sub2ind_reg(index,i,t,2*n+1,num_outer,num_inner,num_trees,num_nodes);
int inter_right_idx = inter_left_idx + 1;
bottom_diff[diff_idx] = (
dn_data[diff_idx] * inter_data[inter_right_idx] -
(Dtype(1.0) - dn_data[diff_idx]) * inter_data[inter_left_idx]);
int inter_parent_idx = sub2ind_reg(index,i,t,n,num_outer,num_inner,num_trees,num_nodes);
inter_data[inter_parent_idx] = inter_data[inter_left_idx] + inter_data[inter_right_idx];
}
}
}
}
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::UpdateTreePredictionAllDataGPU()
{
if (num_classes_==1){
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(1);
Dtype ** tree_prediction_vec = new Dtype * [iter_times_in_epoch_];
Dtype const ** routing_vec = new Dtype const * [iter_times_in_epoch_];
Dtype const ** all_label_vec = new Dtype const * [iter_times_in_epoch_];
for (int iter = 0; iter < iter_times_in_epoch_; iter++) {
tree_prediction_vec[iter] = tree_prediction_all_data_prob_density_vec_[iter].get()->mutable_gpu_data();
cudaMemset(tree_prediction_vec[iter], 0, sizeof(Dtype)* tree_prediction_all_data_prob_density_vec_[iter].get()->count());
routing_vec[iter] = routing_leaf_all_data_prob_vec_[iter].get()->gpu_data();
all_label_vec[iter] = all_data_label_vec_[iter].get()->gpu_data();
}
Dtype ** gpu_tree_prediction_vec;
Dtype const ** gpu_routing_vec;
Dtype const ** gpu_all_label_vec;
cudaMalloc((void**)&gpu_tree_prediction_vec, sizeof(Dtype *)*iter_times_in_epoch_);
cudaMemcpy(gpu_tree_prediction_vec, tree_prediction_vec, sizeof(Dtype *)*iter_times_in_epoch_, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_routing_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
cudaMemcpy(gpu_routing_vec, routing_vec, sizeof(Dtype const*)*iter_times_in_epoch_, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
cudaMemcpy(gpu_all_label_vec, all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_, cudaMemcpyHostToDevice);
kernel_updata_all_reg<Dtype><<<CAFFE_GET_BLOCKS(num_outer_iter*num_inner_iter*iter_times_in_epoch_), CAFFE_CUDA_NUM_THREADS>>>(
num_outer_iter, num_inner_iter, num_trees_, num_leaf_nodes_per_tree_, num_classes_,iter_times_in_epoch_,
mean_->gpu_data(), sigma_square_->gpu_data(), gpu_routing_vec, gpu_all_label_vec, gpu_tree_prediction_vec);
} else {
for (int iter = 0; iter < iter_times_in_epoch_; iter++){
Dtype* tree_prediction_all_data_prob_density_data = tree_prediction_all_data_prob_density_vec_[iter].get()->mutable_cpu_data();
memset(tree_prediction_all_data_prob_density_data, 0, sizeof(Dtype)* tree_prediction_all_data_prob_density_vec_[iter].get()->count());
const Dtype* routing_leaf_all_data_prob_data = routing_leaf_all_data_prob_vec_[iter].get()->cpu_data();
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(1);
for (int i = 0; i < num_outer_iter; i++){
for (int k = 0; k < num_inner_iter; k++){
const Dtype* y = all_data_label_vec_[iter].get()->cpu_data() + all_data_label_vec_[iter].get()->offset(i, k, 0, 0);
for (int t = 0; t < num_trees_; t++){
for (int j = 0; j < num_leaf_nodes_per_tree_; j++){
tree_prediction_all_data_prob_density_data[tree_prediction_all_data_prob_density_vec_[iter].get()->offset(i, k, t, 0)] +=
routing_leaf_all_data_prob_data[routing_leaf_all_data_prob_vec_[iter].get()->offset(i, k, t, j)] *
max(multivariate_gaussian(y, mean_->cpu_data() + mean_->offset(t, j, 0, 0), sigma_square_->cpu_data() + sigma_square_->offset(t, j, 0, 0), num_classes_),Dtype(FLT_MIN));
}
}
}
}
}
}
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::UpdateClassLabelDistrGPU() {
num_epoch_++;
LOG(INFO) << "Epoch " << num_epoch_ <<": Start updating class label distribution";
int iter_times = 0;
Dtype* mu_new = new Dtype [num_classes_];
if (num_classes_==1){
Blob<Dtype> mean_temp(mean_->shape());
Dtype* mean_temp_data = mean_temp.mutable_gpu_data();
Blob<Dtype> sigma_square_temp(sigma_square_->shape());
Dtype* sigma_square_temp_data = sigma_square_temp.mutable_gpu_data();
while (iter_times < iter_times_class_label_distr_) {
LOG(INFO) << "Label distribution update iteration " << iter_times;
UpdateTreePredictionAllDataGPU();
cudaMemset(mean_temp.mutable_gpu_data(), 0, sizeof(Dtype)* mean_temp.count());
cudaMemset(sigma_square_temp.mutable_gpu_data(), 0, sizeof(Dtype)* sigma_square_temp.count());
Dtype const ** tree_prediction_vec = new Dtype const* [iter_times_in_epoch_];
Dtype const ** routing_vec = new Dtype const * [iter_times_in_epoch_];
Dtype const ** all_label_vec = new Dtype const * [iter_times_in_epoch_];
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[0].get()->shape(1);
for (int iter = 0; iter < iter_times_in_epoch_; iter++) {
tree_prediction_vec[iter] = tree_prediction_all_data_prob_density_vec_[iter].get()->gpu_data();
routing_vec[iter] = routing_leaf_all_data_prob_vec_[iter].get()->gpu_data();
all_label_vec[iter] = all_data_label_vec_[iter].get()->gpu_data();
}
Dtype const ** gpu_tree_prediction_vec;
Dtype const ** gpu_routing_vec;
Dtype const ** gpu_all_label_vec;
cudaMalloc((void**)&gpu_tree_prediction_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
cudaMemcpy(gpu_tree_prediction_vec, tree_prediction_vec, sizeof(Dtype const *)*iter_times_in_epoch_, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_routing_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
cudaMemcpy(gpu_routing_vec, routing_vec, sizeof(Dtype const*)*iter_times_in_epoch_, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_);
cudaMemcpy(gpu_all_label_vec, all_label_vec, sizeof(Dtype const *)*iter_times_in_epoch_, cudaMemcpyHostToDevice);
kernel_mean_sig_reg<Dtype><<<CAFFE_GET_BLOCKS(num_trees_ * num_leaf_nodes_per_tree_), CAFFE_CUDA_NUM_THREADS>>>(
num_trees_, num_leaf_nodes_per_tree_, num_outer_iter, num_inner_iter, iter_times_in_epoch_,
mean_->gpu_data(), sigma_square_->gpu_data(), gpu_all_label_vec,
gpu_routing_vec, gpu_tree_prediction_vec, mean_temp_data, sigma_square_temp_data);
delete [] tree_prediction_vec; tree_prediction_vec=NULL;
delete [] routing_vec; routing_vec=NULL;
delete [] all_label_vec; all_label_vec=NULL;
memcpy(mean_->mutable_cpu_data(), mean_temp.cpu_data(), sizeof(Dtype) * mean_->count());
memcpy(sigma_square_->mutable_cpu_data(), sigma_square_temp.cpu_data(), sizeof(Dtype) * sigma_square_->count());
iter_times++;
}
} else {
Blob<Dtype> mean_temp(mean_->shape());
Dtype* mean_temp_data = mean_temp.mutable_cpu_data();
Blob<Dtype> sigma_square_temp(sigma_square_->shape());
Dtype* sigma_square_temp_data = sigma_square_temp.mutable_cpu_data();
while (iter_times < iter_times_class_label_distr_){
LOG(INFO) << "Label distribution update iteration " << iter_times;
UpdateTreePredictionAllData();
memset(mean_temp_data, 0, sizeof(Dtype)* mean_temp.count());
memset(sigma_square_temp_data, 0, sizeof(Dtype) * sigma_square_temp.count());
of_ << "Iter " << iter_times <<":" << "\n";
for (int t = 0; t < num_trees_; t++){
for (int j = 0; j < num_leaf_nodes_per_tree_; j++){
Dtype zeta_sum = (Dtype) 0.0;
const Dtype* mu = mean_->cpu_data() + mean_->offset(t, j, 0, 0);
const Dtype* sigma_square = sigma_square_->cpu_data() + sigma_square_->offset(t, j, 0, 0);
for (int iter = 0; iter < iter_times_in_epoch_; iter++){
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(1);
for (int i = 0; i < num_outer_iter; i++){
for (int k = 0; k < num_inner_iter; k++){
const Dtype* y = all_data_label_vec_[iter].get()->cpu_data() + all_data_label_vec_[iter].get()->offset(i, k, 0, 0);
Dtype zeta = max(multivariate_gaussian(y, mu, sigma_square, num_classes_), Dtype(FLT_MIN)) * routing_leaf_all_data_prob_vec_[iter].get()->data_at(i, k, t, j)
/ max(tree_prediction_all_data_prob_density_vec_[iter].get()->data_at(i, k, t, 0), Dtype(FLT_MIN));
caffe_axpy(num_classes_, zeta, y, mean_temp_data + mean_temp.offset(t, j, 0, 0));
zeta_sum += zeta;
}
}
}
caffe_scal(num_classes_, (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN)), mean_temp_data + mean_temp.offset(t, j, 0, 0));
for (int iter = 0; iter < iter_times_in_epoch_; iter++){
int num_outer_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(0);
int num_inner_iter = tree_prediction_all_data_prob_density_vec_[iter].get()->shape(1);
for (int i = 0; i < num_outer_iter; i++){
for (int k = 0; k < num_inner_iter; k++){
const Dtype* y = all_data_label_vec_[iter].get()->cpu_data() + all_data_label_vec_[iter].get()->offset(i, k, 0, 0);
Dtype zeta = max(multivariate_gaussian(y, mu, sigma_square, num_classes_), Dtype(FLT_MIN)) * routing_leaf_all_data_prob_vec_[iter].get()->data_at(i, k, t, j)
/ max(tree_prediction_all_data_prob_density_vec_[iter].get()->data_at(i, k, t, 0), Dtype(FLT_MIN));
memcpy(mu_new, mean_temp_data + mean_temp.offset(t, j, 0, 0), sizeof(Dtype) * num_classes_);
caffe_sub(num_classes_, y, mu_new, mu_new);
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_classes_, num_classes_, 1, zeta, mu_new, mu_new, (Dtype) 1.0, sigma_square_temp_data + sigma_square_temp.offset(t, j, 0, 0));
}
}
}
caffe_scal(num_classes_ * num_classes_, (Dtype)1.0 / max(zeta_sum, Dtype(FLT_MIN)), sigma_square_temp_data + sigma_square_temp.offset(t, j, 0, 0));
caffe_add_scalar(num_classes_, (Dtype) FLT_EPSILON, sigma_square_temp_data + sigma_square_temp.offset(t, j, 0, 0));
}
}
memcpy(mean_->mutable_cpu_data(), mean_temp_data, sizeof(Dtype) * mean_->count());
memcpy(sigma_square_->mutable_cpu_data(), sigma_square_temp_data, sizeof(Dtype) * sigma_square_->count());
iter_times++;
}
}
LOG(INFO) << "Epoch" << num_epoch_ << ": End updating class label distribution";
delete [] mu_new; mu_new = NULL;
RecordClassLabelDistr();
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
tree_for_training_ = caffe_rng_rand() % num_trees_;
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
Dtype* routing_split_prob_data = routing_split_prob_.mutable_gpu_data();
Dtype* routing_leaf_prob_data = routing_leaf_prob_.mutable_gpu_data();
const Dtype* dn_data = dn_->gpu_data();
const Dtype* sub_dimensions_data = sub_dimensions_->gpu_data();
kernel_routing<Dtype> <<<CAFFE_GET_BLOCKS(num_outer_ * num_inner_ * num_trees_),CAFFE_CUDA_NUM_THREADS >>>(
num_outer_, num_trees_, num_dims_, bottom[0]->height(), bottom[0]->width(), num_leaf_nodes_per_tree_, num_split_nodes_per_tree_, dn_data,
sub_dimensions_data, routing_split_prob_data, routing_leaf_prob_data);
const Dtype* mean_data = mean_->cpu_data();
Dtype* routing_leaf_all_data_prob_data = routing_leaf_all_data_prob_vec_[iter_times_ % all_data_vec_length_].get()->mutable_cpu_data();
Dtype* all_data_label_data = all_data_label_vec_[iter_times_ % all_data_vec_length_].get()->mutable_cpu_data();
Dtype* tree_prediction_data = tree_prediction_.mutable_cpu_data();
caffe_set(tree_prediction_.count(), (Dtype) 0.0, tree_prediction_data);
Dtype loss = (Dtype) 0.0;
int count = 0;
for (int i = 0; i < num_outer_; i++){
for (int k = 0; k < num_inner_; k++){
memcpy(routing_leaf_all_data_prob_data + routing_leaf_all_data_prob_vec_[iter_times_ % all_data_vec_length_].get()->offset(i, k, 0, 0),
routing_leaf_prob_.cpu_data() + routing_leaf_prob_.offset(i, k, 0, 0), sizeof(Dtype)* num_leaf_nodes_per_tree_ * num_trees_);
if(drop_out_)
{
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 1, num_classes_, num_leaf_nodes_per_tree_,
(Dtype)1.0, routing_leaf_prob_.cpu_data() + routing_leaf_prob_.offset(i, k, tree_for_training_, 0),
mean_data + mean_->offset(tree_for_training_, 0, 0, 0),
(Dtype)0.0, tree_prediction_data + tree_prediction_.offset(i, k, tree_for_training_, 0));
}
else
{
for(int t = 0; t < num_trees_; t++)
{
caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 1, num_classes_, num_leaf_nodes_per_tree_,
(Dtype)1.0, routing_leaf_prob_.cpu_data() + routing_leaf_prob_.offset(i, k, t, 0),
mean_data + mean_->offset(t, 0, 0, 0),
(Dtype)0.0, tree_prediction_data + tree_prediction_.offset(i, k, t, 0));
}
}
for(int j = 0; j < num_classes_; ++j)
{
const Dtype label_value = bottom[1]->data_at(i, j, k / dn_->width(), k % dn_->width())/scale_;
const Dtype label_weight = 0.0001 * (bottom[2]->data_at(i, j, k / dn_->width(), k % dn_->width()));
all_data_label_data[all_data_label_vec_[iter_times_ % all_data_vec_length_].get()->offset(i, k, j, 0)]
= label_value;
if (drop_out_)
{
loss += 0.5 * label_weight * (label_value - tree_prediction_.data_at(i, k, tree_for_training_, j)) * (label_value - tree_prediction_.data_at(i, k, tree_for_training_, j));
}
else
{
for(int t = 0; t < num_trees_; t++)
{
loss += 0.5 * label_weight * (label_value - tree_prediction_.data_at(i, k, t, j)) * (label_value - tree_prediction_.data_at(i, k, t, j));
}
}
}
count++;
}
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
}
template <typename Dtype>
void NeuralDecisionRegForestWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
if (propagate_down[1])
{
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0])
{
int count=0;
if (drop_out_) {
LOG(FATAL)<<"not implement";
caffe_set(mean_->count(), static_cast<Dtype>(0), mean_->mutable_cpu_diff());
caffe_set(sigma_square_->count(), static_cast<Dtype>(0), sigma_square_->mutable_cpu_diff());
caffe_set(sub_dimensions_->count(), static_cast<Dtype>(0), sub_dimensions_->mutable_cpu_diff());
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
caffe_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
Dtype* inter_var_data = inter_var_.mutable_cpu_data();
memset(inter_var_data, (Dtype) 0.0, sizeof(Dtype) * inter_var_.count());
const Dtype* dn_data = dn_->cpu_data();
for (int i = 0; i < num_outer_; i++){
for (int k = 0; k < num_inner_; k++){
int t = tree_for_training_;{
for (int l = 0; l < num_leaf_nodes_per_tree_; l++){
for (int j = 0; j < num_classes_; j++){
const Dtype label_value = bottom[1]->data_at(i, j, k / dn_->width(), k % dn_->width())/scale_;
const Dtype label_weight = 0.0001 * (bottom[2]->data_at(i, j, k / dn_->width(), k % dn_->width()));
inter_var_data[inter_var_.offset(i, k, t, num_split_nodes_per_tree_ + l)] += label_weight * (label_value - tree_prediction_.data_at(i, k, t, j)) * mean_->data_at(t, l, j, 0);
}
inter_var_data[inter_var_.offset(i, k, t, num_split_nodes_per_tree_ + l)] *= routing_leaf_prob_.data_at(i, k, t, l);
}
for (int n = num_split_nodes_per_tree_ - 1; n >= 0; n--){
int dim_offset = (int)sub_dimensions_->data_at(t, n, 0, 0);
bottom_diff[bottom[0]->offset(i, dim_offset, k / bottom[0]->width(), k % bottom[0]->width())] =
dn_data[bottom[0]->offset(i, dim_offset, k / bottom[0]->width(), k % bottom[0]->width())] * inter_var_.data_at(i, k, t, 2 * n + 2)
- ((Dtype)1.0 - dn_data[bottom[0]->offset(i, dim_offset, k / bottom[0]->width(), k % bottom[0]->width())]) * inter_var_.data_at(i, k, t, 2 * n + 1);
inter_var_data[inter_var_.offset(i, k, t, n)] = inter_var_.data_at(i, k, t, 2 * n + 2) + inter_var_.data_at(i, k, t, 2 * n + 1);
}
count++;
}
}
}
} else {
cudaMemset(mean_->mutable_gpu_diff(), 0, sizeof(Dtype)*mean_->count());
cudaMemset(sigma_square_->mutable_gpu_diff(), 0, sizeof(Dtype)*sigma_square_->count());
cudaMemset(sub_dimensions_->mutable_gpu_diff(), 0, sizeof(Dtype)*sub_dimensions_->count());
cudaMemset(bottom[0]->mutable_gpu_diff(), 0, sizeof(Dtype)*bottom[0]->count());
cudaMemset(inter_var_.mutable_gpu_data(), 0, sizeof(Dtype)*inter_var_.count());
CHECK_EQ(dn_->width(), bottom[1]->width());
CHECK_EQ(dn_->height(), bottom[1]->height());
CHECK_EQ(dn_->width(), bottom[2]->width());
CHECK_EQ(dn_->height(), bottom[2]->height());
kernel_backward_all_reg<Dtype><<<CAFFE_GET_BLOCKS(num_outer_), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->mutable_gpu_diff(), inter_var_.mutable_gpu_data(),tree_prediction_.gpu_data(),mean_->gpu_data(),
bottom[1]->gpu_data(), bottom[2]->gpu_data(), routing_leaf_prob_.gpu_data(), dn_->gpu_data(), sub_dimensions_->gpu_data(),
num_outer_, num_inner_,num_trees_, num_leaf_nodes_per_tree_, num_split_nodes_per_tree_, dn_->height(),
dn_->width(), num_classes_, num_dims_, scale_);
count = num_outer_*num_inner_*num_trees_;
}
// Scale down gradient
const Dtype loss_weight = top[0]->cpu_diff()[0];
caffe_scal(bottom[0]->count(), loss_weight / get_normalizer(normalization_, count), bottom[0]->mutable_cpu_diff());
}
if (iter_times_ && (iter_times_ + 1) % iter_times_in_epoch_ == 0)
UpdateClassLabelDistrGPU();
iter_times_++;
}
INSTANTIATE_LAYER_GPU_FUNCS(NeuralDecisionRegForestWithLossLayer);
}
|
fc222d19305ea3da6fc1a9578c52ce1611f45443.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAStdAfx.h"
#include "RT/Primitive/LightSource.hpp"
GLOBAL void normalizeIntensities(AreaLightSourceCollection aLights)
{
for(uint lightID = globalThreadId1D(); lightID < aLights.size(); lightID+= numThreads())
{
float probabilityRCP = aLights.getTotalWeight() / aLights.getWeight(lightID);
AreaLightSource ls = aLights.getLightWithID(lightID);
ls.intensity *= probabilityRCP;
}
}
HOST void AreaLightSourceCollection::normalizeALSIntensities()
{
if(mSize <= 1u)
return;
dim3 block ( 128 );
dim3 grid ( 180 );
hipLaunchKernelGGL(( normalizeIntensities), dim3(grid), dim3(block), 0, 0, *this);
hipDeviceSynchronize();
MY_CUT_CHECK_ERROR("Normalize LS intensity failed!\n");
} | fc222d19305ea3da6fc1a9578c52ce1611f45443.cu | #include "CUDAStdAfx.h"
#include "RT/Primitive/LightSource.hpp"
GLOBAL void normalizeIntensities(AreaLightSourceCollection aLights)
{
for(uint lightID = globalThreadId1D(); lightID < aLights.size(); lightID+= numThreads())
{
float probabilityRCP = aLights.getTotalWeight() / aLights.getWeight(lightID);
AreaLightSource ls = aLights.getLightWithID(lightID);
ls.intensity *= probabilityRCP;
}
}
HOST void AreaLightSourceCollection::normalizeALSIntensities()
{
if(mSize <= 1u)
return;
dim3 block ( 128 );
dim3 grid ( 180 );
normalizeIntensities<<< grid, block>>>(*this);
cudaDeviceSynchronize();
MY_CUT_CHECK_ERROR("Normalize LS intensity failed!\n");
} |
e16066a7b486dd60ce0c5598ca5d432ca91f6e5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Demo code of Cuda programming lecture
*
* This programme is a simple implementation of vector addition in CUDA
*
*
*/
#include <sys/time.h>
#include <cstdlib>
#include <cstdio>
// Device code
__global__ void VecAdd(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
int N = 33554432;
size_t size = N * sizeof(int);
int threadsPerBlock = 1024;
int blocksPerGrid = N / threadsPerBlock;
//Time measurement
timeval kernel_start, kernel_end;
timeval global_start, global_end;
float kernel_elapsed_time, global_elapsed_time;
// Allocate host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
//Initialization
for (int i = 0; i < N; i++)
{
h_A[i] = i;
h_B[i] = i;
}
// Allocate device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
//Start global timer
gettimeofday(&global_start, NULL);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
//Start kernel timer
gettimeofday(&kernel_start, NULL);
// Invoke kernel
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C);
//Since kernel launch is asynchronized, block the host code until the kernel finishes
hipDeviceSynchronize();
//End kernel timer
gettimeofday(&kernel_end, NULL);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
//hipMemcpy is synchronized, no barrier is needed here
//Stop global timer
gettimeofday(&global_end, NULL);
//get kernel elapsed time
kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000;
//get global elapsed time
global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000;
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time);
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
//Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
| e16066a7b486dd60ce0c5598ca5d432ca91f6e5f.cu | /**
* Demo code of Cuda programming lecture
*
* This programme is a simple implementation of vector addition in CUDA
*
*
*/
#include <sys/time.h>
#include <cstdlib>
#include <cstdio>
// Device code
__global__ void VecAdd(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
int N = 33554432;
size_t size = N * sizeof(int);
int threadsPerBlock = 1024;
int blocksPerGrid = N / threadsPerBlock;
//Time measurement
timeval kernel_start, kernel_end;
timeval global_start, global_end;
float kernel_elapsed_time, global_elapsed_time;
// Allocate host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
//Initialization
for (int i = 0; i < N; i++)
{
h_A[i] = i;
h_B[i] = i;
}
// Allocate device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//Start global timer
gettimeofday(&global_start, NULL);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Start kernel timer
gettimeofday(&kernel_start, NULL);
// Invoke kernel
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C);
//Since kernel launch is asynchronized, block the host code until the kernel finishes
cudaDeviceSynchronize();
//End kernel timer
gettimeofday(&kernel_end, NULL);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//cudaMemcpy is synchronized, no barrier is needed here
//Stop global timer
gettimeofday(&global_end, NULL);
//get kernel elapsed time
kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000;
//get global elapsed time
global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000;
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time);
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
//Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
697f5d2eab850afdb18077a82ec039dfcdaece63.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
double gpu_start;
double gpu_stop;
double cpu_start;
double cpu_stop;
double application_start;
double application_stop;
double compute_migrate_start;
double compute_migrate_stop;
double malloc_start;
double malloc_stop;
double free_start;
double free_stop;
double cuda_malloc_start;
double cuda_malloc_stop;
double cuda_free_start;
double cuda_free_stop;
double init_data_start;
double init_data_stop;
double h2d_memcpy_start;
double h2d_memcpy_stop;
double d2h_memcpy_start;
double d2h_memcpy_stop;
double h2d_prefetch_start;
double h2d_prefetch_stop;
double d2h_prefetch_start;
double d2h_prefetch_stop;
double advise_start;
double advise_stop;
double advise_read_start;
double advise_read_stop;
double misc_start;
double misc_stop;
double misc_timer;
int nIter = 10;
bool validate = false;
double mysecond(){
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
int devID = findCudaDevice(argc, (const char **)argv);
application_start = mysecond();
cuda_malloc_start = application_start;
// Allocate host memory for matrices A and B
unsigned long int size_A = dimsA.x * dimsA.y;
unsigned long int mem_size_A = sizeof(float) * size_A;
// float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
float *h_A;
checkCudaErrors(hipMallocManaged(reinterpret_cast<void **>(&h_A), mem_size_A));
unsigned long int size_B = dimsB.x * dimsB.y;
unsigned long int mem_size_B = sizeof(float) * size_B;
// float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
float *h_B;
checkCudaErrors(hipMallocManaged(reinterpret_cast<void **>(&h_B), mem_size_B));
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned long int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
// float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
float *h_C;
checkCudaErrors(hipMallocManaged(reinterpret_cast<void **>(&h_C), mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
// Allocate device memory
float *d_A, *d_B, *d_C;
d_A = h_A;
d_B = h_B;
d_C = h_C;
cuda_malloc_stop = mysecond();
malloc_start = cuda_malloc_stop;
float *h_C_host = (float*)malloc(mem_size_C);
assert(h_C_host);
malloc_stop = mysecond();
advise_start = malloc_stop;
hipMemAdvise(h_A, mem_size_A, hipMemAdviseSetPreferredLocation, devID);
hipMemAdvise(h_A, mem_size_A, hipMemAdviseSetAccessedBy, hipCpuDeviceId);
hipMemAdvise(h_B, mem_size_B, hipMemAdviseSetPreferredLocation, devID);
hipMemAdvise(h_B, mem_size_B, hipMemAdviseSetAccessedBy, hipCpuDeviceId);
hipMemAdvise(h_C, mem_size_C, hipMemAdviseSetPreferredLocation, devID);
hipMemAdvise(h_C, mem_size_C, hipMemAdviseSetAccessedBy, hipCpuDeviceId);
advise_stop = mysecond();
// Initialize host memory
init_data_start = advise_stop;
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
init_data_stop = mysecond();
advise_read_start = init_data_stop;
hipMemAdvise(h_A, mem_size_B, hipMemAdviseSetReadMostly, devID);
hipMemAdvise(h_B, mem_size_B, hipMemAdviseSetReadMostly, devID);
advise_read_stop = mysecond();
// copy host memory to device
//checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
compute_migrate_start = mysecond();
gpu_start = compute_migrate_start;
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
checkCudaErrors(hipEventCreate(&start));
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
//int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
//hipMemAdvise(h_C, mem_size_C, hipMemAdviseUnsetAccessedBy, devID);
//hipMemAdvise(h_C, mem_size_C, hipMemAdviseSetAccessedBy, hipCpuDeviceId);
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
gpu_stop = mysecond();
misc_start = gpu_stop;
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
//checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost));
bool correct = true;
misc_stop = mysecond();
misc_timer = misc_stop - misc_start;
if (validate) {
printf("Checking computed result for correctness: ");
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
else {
d2h_memcpy_start = mysecond();
memcpy(h_C_host, h_C, mem_size_C);
d2h_memcpy_stop = mysecond();
}
compute_migrate_stop = mysecond();
// Clean up memory
//free(h_A);
//free(h_B);
//free(h_C);
cuda_free_start = compute_migrate_stop;
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
cuda_free_stop = mysecond();
free_start = cuda_free_stop;
free(h_C_host);
free_stop = mysecond();
application_stop = free_stop;
printf("\nGPU Time: %f\n", gpu_stop - gpu_start);
printf("CPU Time: %f\n", cpu_stop - cpu_start);
printf("malloc timer: %f\n", malloc_stop - malloc_start);
printf("free timer: %f\n", free_stop - free_start);
printf("cuda malloc timer: %f\n", cuda_malloc_stop - cuda_malloc_start);
printf("cuda free timer: %f\n", cuda_free_stop - cuda_free_start);
printf("Init data timer: %f\n", init_data_stop - init_data_start);
printf("\nAdvise timer: %f\n", (advise_stop - advise_start) + (advise_read_stop - advise_read_start));
printf("\nH2D async prefetch timer: %f\n", h2d_prefetch_stop - h2d_prefetch_start);
printf("D2H async prefetch timer: %f\n", d2h_prefetch_stop - d2h_prefetch_start);
printf("misc timer: %f\n", misc_timer);
//printf("\nH2D timer: %f\n", h2d_memcpy_stop - h2d_memcpy_start);
printf("\nD2H timer: %f\n", d2h_memcpy_stop - d2h_memcpy_start);
printf("\ncompute migrate timer: %f\n", compute_migrate_stop - compute_migrate_start);
printf("application timer: %f\n\n", application_stop - application_start);
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (checkCmdLineFlag(argc, (const char **)argv, "iterations")) {
nIter = getCmdLineArgumentInt(argc, (const char **)argv, "iterations");
}
if (checkCmdLineFlag(argc, (const char **)argv, "validate")) {
validate = true;
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
double start_time = mysecond();
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
double end_time = mysecond();
double elapsedTime = end_time - start_time;
printf("runtime: %f\n", elapsedTime);
exit(matrix_result);
}
| 697f5d2eab850afdb18077a82ec039dfcdaece63.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
double gpu_start;
double gpu_stop;
double cpu_start;
double cpu_stop;
double application_start;
double application_stop;
double compute_migrate_start;
double compute_migrate_stop;
double malloc_start;
double malloc_stop;
double free_start;
double free_stop;
double cuda_malloc_start;
double cuda_malloc_stop;
double cuda_free_start;
double cuda_free_stop;
double init_data_start;
double init_data_stop;
double h2d_memcpy_start;
double h2d_memcpy_stop;
double d2h_memcpy_start;
double d2h_memcpy_stop;
double h2d_prefetch_start;
double h2d_prefetch_stop;
double d2h_prefetch_start;
double d2h_prefetch_stop;
double advise_start;
double advise_stop;
double advise_read_start;
double advise_read_stop;
double misc_start;
double misc_stop;
double misc_timer;
int nIter = 10;
bool validate = false;
double mysecond(){
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
int devID = findCudaDevice(argc, (const char **)argv);
application_start = mysecond();
cuda_malloc_start = application_start;
// Allocate host memory for matrices A and B
unsigned long int size_A = dimsA.x * dimsA.y;
unsigned long int mem_size_A = sizeof(float) * size_A;
// float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
float *h_A;
checkCudaErrors(cudaMallocManaged(reinterpret_cast<void **>(&h_A), mem_size_A));
unsigned long int size_B = dimsB.x * dimsB.y;
unsigned long int mem_size_B = sizeof(float) * size_B;
// float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
float *h_B;
checkCudaErrors(cudaMallocManaged(reinterpret_cast<void **>(&h_B), mem_size_B));
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned long int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
// float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
float *h_C;
checkCudaErrors(cudaMallocManaged(reinterpret_cast<void **>(&h_C), mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
// Allocate device memory
float *d_A, *d_B, *d_C;
d_A = h_A;
d_B = h_B;
d_C = h_C;
cuda_malloc_stop = mysecond();
malloc_start = cuda_malloc_stop;
float *h_C_host = (float*)malloc(mem_size_C);
assert(h_C_host);
malloc_stop = mysecond();
advise_start = malloc_stop;
cudaMemAdvise(h_A, mem_size_A, cudaMemAdviseSetPreferredLocation, devID);
cudaMemAdvise(h_A, mem_size_A, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
cudaMemAdvise(h_B, mem_size_B, cudaMemAdviseSetPreferredLocation, devID);
cudaMemAdvise(h_B, mem_size_B, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
cudaMemAdvise(h_C, mem_size_C, cudaMemAdviseSetPreferredLocation, devID);
cudaMemAdvise(h_C, mem_size_C, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
advise_stop = mysecond();
// Initialize host memory
init_data_start = advise_stop;
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
init_data_stop = mysecond();
advise_read_start = init_data_stop;
cudaMemAdvise(h_A, mem_size_B, cudaMemAdviseSetReadMostly, devID);
cudaMemAdvise(h_B, mem_size_B, cudaMemAdviseSetReadMostly, devID);
advise_read_stop = mysecond();
// copy host memory to device
//checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
compute_migrate_start = mysecond();
gpu_start = compute_migrate_start;
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
checkCudaErrors(cudaEventCreate(&start));
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
//int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
//cudaMemAdvise(h_C, mem_size_C, cudaMemAdviseUnsetAccessedBy, devID);
//cudaMemAdvise(h_C, mem_size_C, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
gpu_stop = mysecond();
misc_start = gpu_stop;
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
//checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost));
bool correct = true;
misc_stop = mysecond();
misc_timer = misc_stop - misc_start;
if (validate) {
printf("Checking computed result for correctness: ");
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
}
else {
d2h_memcpy_start = mysecond();
memcpy(h_C_host, h_C, mem_size_C);
d2h_memcpy_stop = mysecond();
}
compute_migrate_stop = mysecond();
// Clean up memory
//free(h_A);
//free(h_B);
//free(h_C);
cuda_free_start = compute_migrate_stop;
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
cuda_free_stop = mysecond();
free_start = cuda_free_stop;
free(h_C_host);
free_stop = mysecond();
application_stop = free_stop;
printf("\nGPU Time: %f\n", gpu_stop - gpu_start);
printf("CPU Time: %f\n", cpu_stop - cpu_start);
printf("malloc timer: %f\n", malloc_stop - malloc_start);
printf("free timer: %f\n", free_stop - free_start);
printf("cuda malloc timer: %f\n", cuda_malloc_stop - cuda_malloc_start);
printf("cuda free timer: %f\n", cuda_free_stop - cuda_free_start);
printf("Init data timer: %f\n", init_data_stop - init_data_start);
printf("\nAdvise timer: %f\n", (advise_stop - advise_start) + (advise_read_stop - advise_read_start));
printf("\nH2D async prefetch timer: %f\n", h2d_prefetch_stop - h2d_prefetch_start);
printf("D2H async prefetch timer: %f\n", d2h_prefetch_stop - d2h_prefetch_start);
printf("misc timer: %f\n", misc_timer);
//printf("\nH2D timer: %f\n", h2d_memcpy_stop - h2d_memcpy_start);
printf("\nD2H timer: %f\n", d2h_memcpy_stop - d2h_memcpy_start);
printf("\ncompute migrate timer: %f\n", compute_migrate_stop - compute_migrate_start);
printf("application timer: %f\n\n", application_stop - application_start);
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (checkCmdLineFlag(argc, (const char **)argv, "iterations")) {
nIter = getCmdLineArgumentInt(argc, (const char **)argv, "iterations");
}
if (checkCmdLineFlag(argc, (const char **)argv, "validate")) {
validate = true;
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
double start_time = mysecond();
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
double end_time = mysecond();
double elapsedTime = end_time - start_time;
printf("runtime: %f\n", elapsedTime);
exit(matrix_result);
}
|
d02e2e9861f15ac4f60e488e571a676718cb9358.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaParticleVV.hh"
#include "kernelfuncs.h"
#include "kerneltemplate.hh"
void cudaParticleVV::setup(int n) {
cudaParticleBase::setup(n);
Fold = a;
}
void cudaParticleVV::clearForce(void) {
cudaParticleBase::clearForce();
hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, Fold, N);
if (withInfo) ErrorInfo("cudaParticleVV::clearForce");
}
void cudaParticleVV::TimeEvolution(real dt) {
/**
*
* full scheme of Velocity Verlet at time t in our way is
*
* + SHAKE constraints
* + calc F(t) from r(t)
* + calc v(t) from v(t-dt/2), F(t)
* + RATTLE constraints
* + calc v(t+dt/2) from v(t), F(t)
* + calc r(t+dt) from v(t+dt/2)
*
* in another style, itertion loop starts from calc v(t+dt/2)
*/
hipLaunchKernelGGL(( propagateVelocityVerlet_F4), dim3(MPnum), dim3(THnum1D), 0, 0, r, dt, v, F, Fold, minv, N);
if (withInfo) ErrorInfo("cudaParticleVV::TimeEvolution");
}
| d02e2e9861f15ac4f60e488e571a676718cb9358.cu | #include "cudaParticleVV.hh"
#include "kernelfuncs.h"
#include "kerneltemplate.hh"
void cudaParticleVV::setup(int n) {
cudaParticleBase::setup(n);
Fold = a;
}
void cudaParticleVV::clearForce(void) {
cudaParticleBase::clearForce();
clearArray_F4<<<MPnum, THnum1D>>>(Fold, N);
if (withInfo) ErrorInfo("cudaParticleVV::clearForce");
}
void cudaParticleVV::TimeEvolution(real dt) {
/**
*
* full scheme of Velocity Verlet at time t in our way is
*
* + SHAKE constraints
* + calc F(t) from r(t)
* + calc v(t) from v(t-dt/2), F(t)
* + RATTLE constraints
* + calc v(t+dt/2) from v(t), F(t)
* + calc r(t+dt) from v(t+dt/2)
*
* in another style, itertion loop starts from calc v(t+dt/2)
*/
propagateVelocityVerlet_F4<<<MPnum, THnum1D>>>(r, dt, v, F, Fold, minv, N);
if (withInfo) ErrorInfo("cudaParticleVV::TimeEvolution");
}
|
73c9eee0d899db730fee97d71bc3020bad077e13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice1D.h"
#include "reductionADDLock.h"
#include <hiprand/hiprand_kernel.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void montecarlo(hiprandState_t* tabDevGeneratorGM, long nbDarts, long* ptrDevNxTotal);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ float f(float x);
__device__ int mutex=0;
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void montecarlo(hiprandState_t* tabDevGeneratorGM, long nbDarts, uint m, long* ptrDevNxTotal)
{
extern __shared__ long TAB_SM[];
//reductionIntraThread(TAB_SM, nbSlice);
// lancer les flechettes
// Regarder si elle est en dessous
// ADD myNx
const int NB_THREAD=Indice1D::nbThread();
const int TID=Indice1D::tid();
const int TIDLocal = Indice1D::tidLocal();
hiprandState_t generator = tabDevGeneratorGM[TID];
long localNx = 0;
float x;
float y;
for(long i = 0; i < nbDarts; i++)
{
x = hiprand_uniform(&generator);
// * M NORMALEMENT VVV
y = hiprand_uniform(&generator) * m;
if ( y < f(x) )
{
localNx ++;
}
}
TAB_SM[TIDLocal] = localNx;
__syncthreads();
Lock l = Lock(&mutex);
reductionADD<long>(TAB_SM, ptrDevNxTotal, &l);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ float f(float x)
{
return 4/(1+x*x);
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 73c9eee0d899db730fee97d71bc3020bad077e13.cu | #include "Indice1D.h"
#include "reductionADDLock.h"
#include <curand_kernel.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void montecarlo(curandState* tabDevGeneratorGM, long nbDarts, long* ptrDevNxTotal);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ float f(float x);
__device__ int mutex=0;
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void montecarlo(curandState* tabDevGeneratorGM, long nbDarts, uint m, long* ptrDevNxTotal)
{
extern __shared__ long TAB_SM[];
//reductionIntraThread(TAB_SM, nbSlice);
// lancer les flechettes
// Regarder si elle est en dessous
// ADD myNx
const int NB_THREAD=Indice1D::nbThread();
const int TID=Indice1D::tid();
const int TIDLocal = Indice1D::tidLocal();
curandState generator = tabDevGeneratorGM[TID];
long localNx = 0;
float x;
float y;
for(long i = 0; i < nbDarts; i++)
{
x = curand_uniform(&generator);
// * M NORMALEMENT VVV
y = curand_uniform(&generator) * m;
if ( y < f(x) )
{
localNx ++;
}
}
TAB_SM[TIDLocal] = localNx;
__syncthreads();
Lock l = Lock(&mutex);
reductionADD<long>(TAB_SM, ptrDevNxTotal, &l);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ float f(float x)
{
return 4/(1+x*x);
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
fbad09c9918c2b962d3a340801d0781c7e94af2d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* HydroGPU.cu
*
* Created on: Aug 2, 2012
* Author: cferenba
*
* Copyright (c) 2012, Los Alamos National Security, LLC.
* All rights reserved.
* Use of this source code is governed by a BSD-style open-source
* license; see top-level LICENSE file for full license text.
*/
#include "HydroGPU.hh"
#include <cmath>
#include <cstdio>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "Memory.hh"
#include "Vec2.hh"
using namespace std;
const int CHUNK_SIZE = 64;
static __constant__ int gpuinit;
static __constant__ int numsch;
static __constant__ int nump;
static __constant__ int numz;
static __constant__ int nums;
static __constant__ int numc;
static __constant__ double dt;
static __constant__ double pgamma, pssmin;
static __constant__ double talfa, tssmin;
static __constant__ double qgamma, q1, q2;
static __constant__ double hcfl, hcflv;
static __constant__ double2 vfixx, vfixy;
static __constant__ int numbcx, numbcy;
static __constant__ double bcx[2], bcy[2];
static __device__ int numsbad;
static __device__ double dtnext;
static __device__ int idtnext;
static __constant__ const int* schsfirst;
static __constant__ const int* schslast;
static __constant__ const int* schzfirst;
static __constant__ const int* schzlast;
static __constant__ const int* mapsp1;
static __constant__ const int* mapsp2;
static __constant__ const int* mapsz;
static __constant__ const int* mapss4;
static __constant__ const int *mapspkey, *mapspval;
static __constant__ const int *mappsfirst, *mapssnext;
static __constant__ const int* znump;
static __constant__ double2 *px, *pxp, *px0;
static __constant__ double2 *zx, *zxp;
static __constant__ double2 *pu, *pu0;
static __constant__ double2* pap;
static __constant__ double2* ssurf;
static __constant__ const double* zm;
static __constant__ double *zr, *zrp;
static __constant__ double *ze, *zetot;
static __constant__ double *zw, *zwrate;
static __constant__ double *zp, *zss;
static __constant__ const double* smf;
static __constant__ double *careap, *sareap, *svolp, *zareap, *zvolp;
static __constant__ double *sarea, *svol, *zarea, *zvol, *zvol0;
static __constant__ double *zdl, *zdu;
static __constant__ double *cmaswt, *pmaswt;
static __constant__ double2 *sfp, *sft, *sfq, *cftot, *pf;
static __constant__ double* cevol;
static __constant__ double* cdu;
static __constant__ double* cdiv;
static __constant__ double2* zuc;
static __constant__ double* crmu;
static __constant__ double2* cqe;
static __constant__ double* ccos;
static __constant__ double* cw;
static __shared__ int dss3[CHUNK_SIZE];
static __shared__ int dss4[CHUNK_SIZE];
static __shared__ double ctemp[CHUNK_SIZE];
static __shared__ double2 ctemp2[CHUNK_SIZE];
static int numschH, numpchH, numzchH;
static int *schsfirstH, *schslastH, *schzfirstH, *schzlastH;
static int *schsfirstD, *schslastD, *schzfirstD, *schzlastD;
static int *mapsp1D, *mapsp2D, *mapszD, *mapss4D, *znumpD;
static int *mapspkeyD, *mapspvalD;
static int *mappsfirstD, *mapssnextD;
static double2 *pxD, *pxpD, *px0D, *zxD, *zxpD, *puD, *pu0D, *papD,
*ssurfD, *sfpD, *sftD, *sfqD, *cftotD, *pfD, *zucD, *cqeD;
static double *zmD, *zrD, *zrpD,
*sareaD, *svolD, *zareaD, *zvolD, *zvol0D, *zdlD, *zduD,
*zeD, *zetot0D, *zetotD, *zwD, *zwrateD,
*zpD, *zssD, *smfD, *careapD, *sareapD, *svolpD, *zareapD, *zvolpD;
static double *cmaswtD, *pmaswtD;
static double *cevolD, *cduD, *cdivD, *crmuD, *ccosD, *cwD;
int checkCudaError(const hipError_t err, const char* cmd)
{
if(err) {
printf("CUDA error in command '%s'\n", cmd); \
printf("Error message: %s\n", hipGetErrorString(err)); \
}
return err;
}
#define CHKERR(cmd) checkCudaError(cmd, #cmd)
static __device__ void advPosHalf(
const int p,
const double2* __restrict__ px0,
const double2* __restrict__ pu0,
const double dt,
double2* __restrict__ pxp) {
pxp[p] = px0[p] + pu0[p] * dt;
}
static __device__ void calcZoneCtrs(
const int s,
const int s0,
const int z,
const int p1,
const double2* __restrict__ px,
double2* __restrict__ zx) {
ctemp2[s0] = px[p1];
__syncthreads();
double2 zxtot = ctemp2[s0];
double zct = 1.;
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
zxtot += ctemp2[sn];
zct += 1.;
}
zx[z] = zxtot / zct;
}
static __device__ void calcSideVols(
const int s,
const int z,
const int p1,
const int p2,
const double2* __restrict__ px,
const double2* __restrict__ zx,
double* __restrict__ sarea,
double* __restrict__ svol)
{
const double third = 1. / 3.;
double sa = 0.5 * cross(px[p2] - px[p1], zx[z] - px[p1]);
double sv = third * sa * (px[p1].x + px[p2].x + zx[z].x);
sarea[s] = sa;
svol[s] = sv;
if (sv <= 0.) atomicAdd(&numsbad, 1);
}
static __device__ void calcZoneVols(
const int s,
const int s0,
const int z,
const double* __restrict__ sarea,
const double* __restrict__ svol,
double* __restrict__ zarea,
double* __restrict__ zvol)
{
// make sure all side volumes have been stored
__syncthreads();
double zatot = sarea[s];
double zvtot = svol[s];
for (int sn = mapss4[s]; sn != s; sn = mapss4[sn]) {
zatot += sarea[sn];
zvtot += svol[sn];
}
zarea[z] = zatot;
zvol[z] = zvtot;
}
static __device__ void meshCalcCharLen(
const int s,
const int s0,
const int s3,
const int z,
const int p1,
const int p2,
const int* __restrict__ znump,
const double2* __restrict__ px,
const double2* __restrict__ zx,
double* __restrict__ zdl) {
double area = 0.5 * cross(px[p2] - px[p1], zx[z] - px[p1]);
double base = length(px[p2] - px[p1]);
double fac = (znump[z] == 3 ? 3. : 4.);
double sdl = fac * area / base;
ctemp[s0] = sdl;
__syncthreads();
double sdlmin = ctemp[s0];
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
sdlmin = min(sdlmin, ctemp[sn]);
}
zdl[z] = sdlmin;
}
static __device__ void hydroCalcRho(const int z,
const double* __restrict__ zm,
const double* __restrict__ zvol,
double* __restrict__ zr)
{
zr[z] = zm[z] / zvol[z];
}
static __device__ void pgasCalcForce(
const int s,
const int z,
const double* __restrict__ zp,
const double2* __restrict__ ssurf,
double2* __restrict__ sf) {
sf[s] = -zp[z] * ssurf[s];
}
static __device__ void ttsCalcForce(
const int s,
const int z,
const double* __restrict__ zarea,
const double* __restrict__ zr,
const double* __restrict__ zss,
const double* __restrict__ sarea,
const double* __restrict__ smf,
const double2* __restrict__ ssurf,
double2* __restrict__ sf) {
double svfacinv = zarea[z] / sarea[s];
double srho = zr[z] * smf[s] * svfacinv;
double sstmp = max(zss[z], tssmin);
sstmp = talfa * sstmp * sstmp;
double sdp = sstmp * (srho - zr[z]);
sf[s] = -sdp * ssurf[s];
}
// Routine number [2] in the full algorithm
// [2.1] Find the corner divergence
// [2.2] Compute the cos angle for c
// [2.3] Find the evolution factor cevol(c)
// and the Delta u(c) = du(c)
static __device__ void qcsSetCornerDiv(
const int s,
const int s0,
const int s3,
const int z,
const int p1,
const int p2) {
// [1] Compute a zone-centered velocity
ctemp2[s0] = pu[p1];
__syncthreads();
double2 zutot = ctemp2[s0];
double zct = 1.;
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
zutot += ctemp2[sn];
zct += 1.;
}
zuc[z] = zutot / zct;
// [2] Divergence at the corner
// Associated zone, corner, point
const int p0 = mapsp1[s3];
double2 up0 = pu[p1];
double2 xp0 = pxp[p1];
double2 up1 = 0.5 * (pu[p1] + pu[p2]);
double2 xp1 = 0.5 * (pxp[p1] + pxp[p2]);
double2 up2 = zuc[z];
double2 xp2 = zxp[z];
double2 up3 = 0.5 * (pu[p0] + pu[p1]);
double2 xp3 = 0.5 * (pxp[p0] + pxp[p1]);
// position, velocity diffs along diagonals
double2 up2m0 = up2 - up0;
double2 xp2m0 = xp2 - xp0;
double2 up3m1 = up3 - up1;
double2 xp3m1 = xp3 - xp1;
// average corner-centered velocity
double2 duav = 0.25 * (up0 + up1 + up2 + up3);
// compute cosine angle
double2 v1 = xp1 - xp0;
double2 v2 = xp3 - xp0;
double de1 = length(v1);
double de2 = length(v2);
double minelen = 2.0 * min(de1, de2);
ccos[s] = (minelen < 1.e-12 ? 0. : dot(v1, v2) / (de1 * de2));
// compute 2d cartesian volume of corner
double cvolume = 0.5 * cross(xp2m0, xp3m1);
careap[s] = cvolume;
// compute velocity divergence of corner
cdiv[s] = (cross(up2m0, xp3m1) - cross(up3m1, xp2m0)) /
(2.0 * cvolume);
// compute delta velocity
double dv1 = length2(up2m0 - up3m1);
double dv2 = length2(up2m0 + up3m1);
double du = sqrt(max(dv1, dv2));
cdu[s] = (cdiv[s] < 0.0 ? du : 0.);
// compute evolution factor
double2 dxx1 = 0.5 * (xp2m0 - xp3m1);
double2 dxx2 = 0.5 * (xp2m0 + xp3m1);
double dx1 = length(dxx1);
double dx2 = length(dxx2);
double test1 = abs(dot(dxx1, duav) * dx2);
double test2 = abs(dot(dxx2, duav) * dx1);
double num = (test1 > test2 ? dx1 : dx2);
double den = (test1 > test2 ? dx2 : dx1);
double r = num / den;
double evol = sqrt(4.0 * cvolume * r);
evol = min(evol, 2.0 * minelen);
cevol[s] = (cdiv[s] < 0.0 ? evol : 0.);
}
// Routine number [4] in the full algorithm CS2DQforce(...)
static __device__ void qcsSetQCnForce(
const int s,
const int s3,
const int z,
const int p1,
const int p2) {
const double gammap1 = qgamma + 1.0;
// [4.1] Compute the rmu (real Kurapatenko viscous scalar)
// Kurapatenko form of the viscosity
double ztmp2 = q2 * 0.25 * gammap1 * cdu[s];
double ztmp1 = q1 * zss[z];
double zkur = ztmp2 + sqrt(ztmp2 * ztmp2 + ztmp1 * ztmp1);
// Compute rmu for each corner
double rmu = zkur * zrp[z] * cevol[s];
rmu = (cdiv[s] > 0. ? 0. : rmu);
// [4.2] Compute the cqe for each corner
const int p0 = mapsp1[s3];
const double elen1 = length(pxp[p1] - pxp[p0]);
const double elen2 = length(pxp[p2] - pxp[p1]);
// Compute: cqe(1,2,3)=edge 1, y component (2nd), 3rd corner
// cqe(2,1,3)=edge 2, x component (1st)
cqe[2 * s] = rmu * (pu[p1] - pu[p0]) / elen1;
cqe[2 * s + 1] = rmu * (pu[p2] - pu[p1]) / elen2;
}
// Routine number [5] in the full algorithm CS2DQforce(...)
static __device__ void qcsSetForce(
const int s,
const int s4,
const int p1,
const int p2) {
// [5.1] Preparation of extra variables
double csin2 = 1. - ccos[s] * ccos[s];
cw[s] = ((csin2 < 1.e-4) ? 0. : careap[s] / csin2);
ccos[s] = ((csin2 < 1.e-4) ? 0. : ccos[s]);
__syncthreads();
// [5.2] Set-Up the forces on corners
const double2 x1 = pxp[p1];
const double2 x2 = pxp[p2];
// Edge length for c1, c2 contribution to s
double elen = length(x1 - x2);
sfq[s] = (cw[s] * (cqe[2*s+1] + ccos[s] * cqe[2*s]) +
cw[s4] * (cqe[2*s4] + ccos[s4] * cqe[2*s4+1]))
/ elen;
}
// Routine number [6] in the full algorithm
static __device__ void qcsSetVelDiff(
const int s,
const int s0,
const int p1,
const int p2,
const int z) {
double2 dx = pxp[p2] - pxp[p1];
double2 du = pu[p2] - pu[p1];
double lenx = length(dx);
double dux = dot(du, dx);
dux = (lenx > 0. ? abs(dux) / lenx : 0.);
ctemp[s0] = dux;
__syncthreads();
double ztmp = ctemp[s0];
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
ztmp = max(ztmp, ctemp[sn]);
}
__syncthreads();
zdu[z] = q1 * zss[z] + 2. * q2 * ztmp;
}
static __device__ void qcsCalcForce(
const int s,
const int s0,
const int s3,
const int s4,
const int z,
const int p1,
const int p2) {
// [1] Find the right, left, top, bottom edges to use for the
// limiters
// *** NOT IMPLEMENTED IN PENNANT ***
// [2] Compute corner divergence and related quantities
qcsSetCornerDiv(s, s0, s3, z, p1, p2);
// [3] Find the limiters Psi(c)
// *** NOT IMPLEMENTED IN PENNANT ***
// [4] Compute the Q vector (corner based)
qcsSetQCnForce(s, s3, z, p1, p2);
// [5] Compute the Q forces
qcsSetForce(s, s4, p1, p2);
ctemp2[s0] = sfp[s] + sft[s] + sfq[s];
__syncthreads();
cftot[s] = ctemp2[s0] - ctemp2[s0 + dss3[s0]];
// [6] Set velocity difference to use to compute timestep
qcsSetVelDiff(s, s0, p1, p2, z);
}
static __device__ void calcCrnrMass(
const int s,
const int s3,
const int z,
const double* __restrict__ zr,
const double* __restrict__ zarea,
const double* __restrict__ smf,
double* __restrict__ cmaswt)
{
double m = zr[z] * zarea[z] * 0.5 * (smf[s] + smf[s3]);
cmaswt[s] = m;
}
static __device__ void pgasCalcEOS(
const int z,
const double* __restrict__ zr,
const double* __restrict__ ze,
double* __restrict__ zp,
double& zper,
double* __restrict__ zss)
{
const double gm1 = pgamma - 1.;
const double ss2 = max(pssmin * pssmin, 1.e-99);
double rx = zr[z];
double ex = max(ze[z], 0.0);
double px = gm1 * rx * ex;
double prex = gm1 * ex;
double perx = gm1 * rx;
double csqd = max(ss2, prex + perx * px / (rx * rx));
zp[z] = px;
zper = perx;
zss[z] = sqrt(csqd);
}
static __device__ void pgasCalcStateAtHalf(
const int z,
const double* __restrict__ zr0,
const double* __restrict__ zvolp,
const double* __restrict__ zvol0,
const double* __restrict__ ze,
const double* __restrict__ zwrate,
const double* __restrict__ zm,
const double dt,
double* __restrict__ zp,
double* __restrict__ zss)
{
double zper;
pgasCalcEOS(z, zr0, ze, zp, zper, zss);
const double dth = 0.5 * dt;
const double zminv = 1. / zm[z];
double dv = (zvolp[z] - zvol0[z]) * zminv;
double bulk = zr0[z] * zss[z] * zss[z];
double denom = 1. + 0.5 * zper * dv;
double src = zwrate[z] * dth * zminv;
zp[z] += (zper * src - zr0[z] * bulk * dv) / denom;
}
static __global__ void gpuInvMap(
const int* mapspkey,
const int* mapspval,
int* mappsfirst,
int* mapssnext)
{
const int i = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (i >= nums) return;
int p = mapspkey[i];
int pp = mapspkey[i+1];
int pm = mapspkey[i-1];
int s = mapspval[i];
int sp = mapspval[i+1];
if (i == 0 || p != pm) mappsfirst[p] = s;
if (i+1 == nums || p != pp)
mapssnext[s] = -1;
else
mapssnext[s] = sp;
}
static __device__ void gatherToPoints(
const int p,
const double* __restrict__ cvar,
double* __restrict__ pvar)
{
double x = 0.;
for (int s = mappsfirst[p]; s >= 0; s = mapssnext[s]) {
x += cvar[s];
}
pvar[p] = x;
}
static __device__ void gatherToPoints(
const int p,
const double2* __restrict__ cvar,
double2* __restrict__ pvar)
{
double2 x = make_double2(0., 0.);
for (int s = mappsfirst[p]; s >= 0; s = mapssnext[s]) {
x += cvar[s];
}
pvar[p] = x;
}
static __device__ void applyFixedBC(
const int p,
const double2* __restrict__ px,
double2* __restrict__ pu,
double2* __restrict__ pf,
const double2 vfix,
const double bcconst) {
const double eps = 1.e-12;
double dp = dot(px[p], vfix);
if (fabs(dp - bcconst) < eps) {
pu[p] = project(pu[p], vfix);
pf[p] = project(pf[p], vfix);
}
}
static __device__ void calcAccel(
const int p,
const double2* __restrict__ pf,
const double* __restrict__ pmass,
double2* __restrict__ pa) {
const double fuzz = 1.e-99;
pa[p] = pf[p] / max(pmass[p], fuzz);
}
static __device__ void advPosFull(
const int p,
const double2* __restrict__ px0,
const double2* __restrict__ pu0,
const double2* __restrict__ pa,
const double dt,
double2* __restrict__ px,
double2* __restrict__ pu) {
pu[p] = pu0[p] + pa[p] * dt;
px[p] = px0[p] + 0.5 * (pu[p] + pu0[p]) * dt;
}
static __device__ void hydroCalcWork(
const int s,
const int s0,
const int s3,
const int z,
const int p1,
const int p2,
const double2* __restrict__ sf,
const double2* __restrict__ sf2,
const double2* __restrict__ pu0,
const double2* __restrict__ pu,
const double2* __restrict__ px,
const double dt,
double* __restrict__ zw,
double* __restrict__ zetot) {
// Compute the work done by finding, for each element/node pair
// dwork= force * vavg
// where force is the force of the element on the node
// and vavg is the average velocity of the node over the time period
double sd1 = dot( (sf[s] + sf2[s]), (pu0[p1] + pu[p1]));
double sd2 = dot(-(sf[s] + sf2[s]), (pu0[p2] + pu[p2]));
double dwork = -0.5 * dt * (sd1 * px[p1].x + sd2 * px[p2].x);
ctemp[s0] = dwork;
double etot = zetot[z];
__syncthreads();
double dwtot = ctemp[s0];
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
dwtot += ctemp[sn];
}
zetot[z] = etot + dwtot;
zw[z] = dwtot;
}
static __device__ void hydroCalcWorkRate(
const int z,
const double* __restrict__ zvol0,
const double* __restrict__ zvol,
const double* __restrict__ zw,
const double* __restrict__ zp,
const double dt,
double* __restrict__ zwrate) {
double dvol = zvol[z] - zvol0[z];
zwrate[z] = (zw[z] + zp[z] * dvol) / dt;
}
static __device__ void hydroCalcEnergy(
const int z,
const double* __restrict__ zetot,
const double* __restrict__ zm,
double* __restrict__ ze) {
const double fuzz = 1.e-99;
ze[z] = zetot[z] / (zm[z] + fuzz);
}
static __device__ void hydroCalcDtCourant(
const int z,
const double* __restrict__ zdu,
const double* __restrict__ zss,
const double* __restrict__ zdl,
double& dtz,
int& idtz) {
const double fuzz = 1.e-99;
double cdu = max(zdu[z], max(zss[z], fuzz));
double dtzcour = zdl[z] * hcfl / cdu;
dtz = dtzcour;
idtz = z << 1;
}
static __device__ void hydroCalcDtVolume(
const int z,
const double* __restrict__ zvol,
const double* __restrict__ zvol0,
const double dtlast,
double& dtz,
int& idtz) {
double zdvov = abs((zvol[z] - zvol0[z]) / zvol0[z]);
double dtzvol = dtlast * hcflv / zdvov;
if (dtzvol < dtz) {
dtz = dtzvol;
idtz = (z << 1) | 1;
}
}
static __device__ double atomicMin(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(min(val,
__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
static __device__ void hydroFindMinDt(
const int z,
const int z0,
const int zlength,
const double dtz,
const int idtz,
double& dtnext,
int& idtnext) {
int* ctempi = (int*) ctemp2;
ctemp[z0] = dtz;
ctempi[z0] = idtz;
__syncthreads();
int len = zlength;
int half = len >> 1;
while (z0 < half) {
len = half + (len & 1);
if (ctemp[z0+len] < ctemp[z0]) {
ctemp[z0] = ctemp[z0+len];
ctempi[z0] = ctempi[z0+len];
}
__syncthreads();
half = len >> 1;
}
if (z0 == 0 && ctemp[0] < dtnext) {
atomicMin(&dtnext, ctemp[0]);
// This line isn't 100% thread-safe, but since it is only for
// a debugging aid, I'm not going to worry about it.
if (dtnext == ctemp[0]) idtnext = ctempi[0];
}
}
static __device__ void hydroCalcDt(
const int z,
const int z0,
const int zlength,
const double* __restrict__ zdu,
const double* __restrict__ zss,
const double* __restrict__ zdl,
const double* __restrict__ zvol,
const double* __restrict__ zvol0,
const double dtlast,
double& dtnext,
int& idtnext) {
double dtz;
int idtz;
hydroCalcDtCourant(z, zdu, zss, zdl, dtz, idtz);
hydroCalcDtVolume(z, zvol, zvol0, dt, dtz, idtz);
hydroFindMinDt(z, z0, zlength, dtz, idtz, dtnext, idtnext);
}
static __global__ void gpuMain1()
{
const int p = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (p >= nump) return;
double dth = 0.5 * dt;
// save off point variable values from previous cycle
px0[p] = px[p];
pu0[p] = pu[p];
// ===== Predictor step =====
// 1. advance mesh to center of time step
advPosHalf(p, px0, pu0, dth, pxp);
}
static __global__ void gpuMain2()
{
const int s0 = threadIdx.x;
const int sch = blockIdx.x;
const int s = schsfirst[sch] + s0;
if (s >= schslast[sch]) return;
const int p1 = mapsp1[s];
const int p2 = mapsp2[s];
const int z = mapsz[s];
const int s4 = mapss4[s];
const int s04 = s4 - schsfirst[sch];
dss4[s0] = s04 - s0;
dss3[s04] = s0 - s04;
__syncthreads();
const int s3 = s + dss3[s0];
// save off zone variable values from previous cycle
zvol0[z] = zvol[z];
// 1a. compute new mesh geometry
calcZoneCtrs(s, s0, z, p1, pxp, zxp);
meshCalcCharLen(s, s0, s3, z, p1, p2, znump, pxp, zxp, zdl);
ssurf[s] = rotateCCW(0.5 * (pxp[p1] + pxp[p2]) - zxp[z]);
calcSideVols(s, z, p1, p2, pxp, zxp, sareap, svolp);
calcZoneVols(s, s0, z, sareap, svolp, zareap, zvolp);
// 2. compute corner masses
hydroCalcRho(z, zm, zvolp, zrp);
calcCrnrMass(s, s3, z, zrp, zareap, smf, cmaswt);
// 3. compute material state (half-advanced)
// call this routine from only one thread per zone
if (s3 > s) pgasCalcStateAtHalf(z, zr, zvolp, zvol0, ze, zwrate,
zm, dt, zp, zss);
__syncthreads();
// 4. compute forces
pgasCalcForce(s, z, zp, ssurf, sfp);
ttsCalcForce(s, z, zareap, zrp, zss, sareap, smf, ssurf, sft);
qcsCalcForce(s, s0, s3, s4, z, p1, p2);
}
static __global__ void gpuMain3()
{
const int p = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (p >= nump) return;
// gather corner masses, forces to points
gatherToPoints(p, cmaswt, pmaswt);
gatherToPoints(p, cftot, pf);
// 4a. apply boundary conditions
for (int bc = 0; bc < numbcx; ++bc)
applyFixedBC(p, pxp, pu0, pf, vfixx, bcx[bc]);
for (int bc = 0; bc < numbcy; ++bc)
applyFixedBC(p, pxp, pu0, pf, vfixy, bcy[bc]);
// 5. compute accelerations
calcAccel(p, pf, pmaswt, pap);
// ===== Corrector step =====
// 6. advance mesh to end of time step
advPosFull(p, px0, pu0, pap, dt, px, pu);
}
static __global__ void gpuMain4()
{
const int s0 = threadIdx.x;
const int sch = blockIdx.x;
const int s = schsfirst[sch] + s0;
if (s >= schslast[sch]) return;
const int p1 = mapsp1[s];
const int p2 = mapsp2[s];
const int z = mapsz[s];
const int s4 = mapss4[s];
const int s04 = s4 - schsfirst[sch];
dss4[s0] = s04 - s0;
dss3[s04] = s0 - s04;
__syncthreads();
const int s3 = s + dss3[s0];
// 6a. compute new mesh geometry
calcZoneCtrs(s, s0, z, p1, px, zx);
calcSideVols(s, z, p1, p2, px, zx, sarea, svol);
calcZoneVols(s, s0, z, sarea, svol, zarea, zvol);
// 7. compute work
hydroCalcWork(s, s0, s3, z, p1, p2, sfp, sfq, pu0, pu, pxp, dt,
zw, zetot);
}
static __global__ void gpuMain5()
{
const int z = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (z >= numz) return;
const int z0 = threadIdx.x;
const int zlength = min(CHUNK_SIZE, numz - blockIdx.x * CHUNK_SIZE);
// 7. compute work
hydroCalcWorkRate(z, zvol0, zvol, zw, zp, dt, zwrate);
// 8. update state variables
hydroCalcEnergy(z, zetot, zm, ze);
hydroCalcRho(z, zm, zvol, zr);
// 9. compute timestep for next cycle
hydroCalcDt(z, z0, zlength, zdu, zss, zdl, zvol, zvol0, dt,
dtnext, idtnext);
}
void meshCheckBadSides() {
int numsbadH;
CHKERR(hipMemcpyFromSymbol(&numsbadH, numsbad, sizeof(int)));
// if there were negative side volumes, error exit
if (numsbadH > 0) {
cerr << "Error: " << numsbadH << " negative side volumes" << endl;
cerr << "Exiting..." << endl;
exit(1);
}
}
void computeChunks(
const int nums,
const int numz,
const int* mapsz,
const int chunksize,
int& numsch,
int*& schsfirst,
int*& schslast,
int*& schzfirst,
int*& schzlast) {
int* stemp1 = Memory::alloc<int>(nums/3+1);
int* stemp2 = Memory::alloc<int>(nums/3+1);
int* ztemp1 = Memory::alloc<int>(nums/3+1);
int* ztemp2 = Memory::alloc<int>(nums/3+1);
int nsch = 0;
int s1;
int s2 = 0;
while (s2 < nums) {
s1 = s2;
s2 = min(s2 + chunksize, nums);
if (s2 < nums) {
while (mapsz[s2] == mapsz[s2-1]) --s2;
}
stemp1[nsch] = s1;
stemp2[nsch] = s2;
ztemp1[nsch] = mapsz[s1];
ztemp2[nsch] = (s2 == nums ? numz : mapsz[s2]);
++nsch;
}
numsch = nsch;
schsfirst = Memory::alloc<int>(numsch);
schslast = Memory::alloc<int>(numsch);
schzfirst = Memory::alloc<int>(numsch);
schzlast = Memory::alloc<int>(numsch);
copy(stemp1, stemp1 + numsch, schsfirst);
copy(stemp2, stemp2 + numsch, schslast);
copy(ztemp1, ztemp1 + numsch, schzfirst);
copy(ztemp2, ztemp2 + numsch, schzlast);
Memory::free(stemp1);
Memory::free(stemp2);
Memory::free(ztemp1);
Memory::free(ztemp2);
}
void hydroInit(
const int numpH,
const int numzH,
const int numsH,
const int numcH,
const int numeH,
const double pgammaH,
const double pssminH,
const double talfaH,
const double tssminH,
const double qgammaH,
const double q1H,
const double q2H,
const double hcflH,
const double hcflvH,
const int numbcxH,
const double* bcxH,
const int numbcyH,
const double* bcyH,
const double2* pxH,
const double2* puH,
const double* zmH,
const double* zrH,
const double* zvolH,
const double* zeH,
const double* zetotH,
const double* zwrateH,
const double* smfH,
const int* mapsp1H,
const int* mapsp2H,
const int* mapszH,
const int* mapss4H,
const int* mapseH,
const int* znumpH) {
printf("Running Hydro on device...\n");
computeChunks(numsH, numzH, mapszH, CHUNK_SIZE, numschH,
schsfirstH, schslastH, schzfirstH, schzlastH);
numpchH = (numpH+CHUNK_SIZE-1) / CHUNK_SIZE;
numzchH = (numzH+CHUNK_SIZE-1) / CHUNK_SIZE;
CHKERR(hipMemcpyToSymbol(numsch, &numschH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(nump, &numpH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(numz, &numzH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(nums, &numsH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(numc, &numcH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(pgamma, &pgammaH, sizeof(double)));
CHKERR(hipMemcpyToSymbol(pssmin, &pssminH, sizeof(double)));
CHKERR(hipMemcpyToSymbol(talfa, &talfaH, sizeof(double)));
CHKERR(hipMemcpyToSymbol(tssmin, &tssminH, sizeof(double)));
CHKERR(hipMemcpyToSymbol(qgamma, &qgammaH, sizeof(double)));
CHKERR(hipMemcpyToSymbol(q1, &q1H, sizeof(double)));
CHKERR(hipMemcpyToSymbol(q2, &q2H, sizeof(double)));
CHKERR(hipMemcpyToSymbol(hcfl, &hcflH, sizeof(double)));
CHKERR(hipMemcpyToSymbol(hcflv, &hcflvH, sizeof(double)));
const double2 vfixxH = make_double2(1., 0.);
const double2 vfixyH = make_double2(0., 1.);
CHKERR(hipMemcpyToSymbol(vfixx, &vfixxH, sizeof(double2)));
CHKERR(hipMemcpyToSymbol(vfixy, &vfixyH, sizeof(double2)));
CHKERR(hipMemcpyToSymbol(numbcx, &numbcxH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(numbcy, &numbcyH, sizeof(int)));
CHKERR(hipMemcpyToSymbol(bcx, bcxH, numbcxH*sizeof(double)));
CHKERR(hipMemcpyToSymbol(bcy, bcyH, numbcyH*sizeof(double)));
CHKERR(hipMalloc(&schsfirstD, numschH*sizeof(int)));
CHKERR(hipMalloc(&schslastD, numschH*sizeof(int)));
CHKERR(hipMalloc(&schzfirstD, numschH*sizeof(int)));
CHKERR(hipMalloc(&schzlastD, numschH*sizeof(int)));
CHKERR(hipMalloc(&mapsp1D, numsH*sizeof(int)));
CHKERR(hipMalloc(&mapsp2D, numsH*sizeof(int)));
CHKERR(hipMalloc(&mapszD, numsH*sizeof(int)));
CHKERR(hipMalloc(&mapss4D, numsH*sizeof(int)));
CHKERR(hipMalloc(&znumpD, numzH*sizeof(int)));
CHKERR(hipMalloc(&pxD, numpH*sizeof(double2)));
CHKERR(hipMalloc(&pxpD, numpH*sizeof(double2)));
CHKERR(hipMalloc(&px0D, numpH*sizeof(double2)));
CHKERR(hipMalloc(&zxD, numzH*sizeof(double2)));
CHKERR(hipMalloc(&zxpD, numzH*sizeof(double2)));
CHKERR(hipMalloc(&puD, numpH*sizeof(double2)));
CHKERR(hipMalloc(&pu0D, numpH*sizeof(double2)));
CHKERR(hipMalloc(&papD, numpH*sizeof(double2)));
CHKERR(hipMalloc(&ssurfD, numsH*sizeof(double2)));
CHKERR(hipMalloc(&zmD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zrD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zrpD, numzH*sizeof(double)));
CHKERR(hipMalloc(&sareaD, numsH*sizeof(double)));
CHKERR(hipMalloc(&svolD, numsH*sizeof(double)));
CHKERR(hipMalloc(&zareaD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zvolD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zvol0D, numzH*sizeof(double)));
CHKERR(hipMalloc(&zdlD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zduD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zeD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zetot0D, numzH*sizeof(double)));
CHKERR(hipMalloc(&zetotD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zwD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zwrateD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zpD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zssD, numzH*sizeof(double)));
CHKERR(hipMalloc(&smfD, numsH*sizeof(double)));
CHKERR(hipMalloc(&careapD, numcH*sizeof(double)));
CHKERR(hipMalloc(&sareapD, numsH*sizeof(double)));
CHKERR(hipMalloc(&svolpD, numsH*sizeof(double)));
CHKERR(hipMalloc(&zareapD, numzH*sizeof(double)));
CHKERR(hipMalloc(&zvolpD, numzH*sizeof(double)));
CHKERR(hipMalloc(&cmaswtD, numsH*sizeof(double)));
CHKERR(hipMalloc(&pmaswtD, numpH*sizeof(double)));
CHKERR(hipMalloc(&sfpD, numsH*sizeof(double2)));
CHKERR(hipMalloc(&sftD, numsH*sizeof(double2)));
CHKERR(hipMalloc(&sfqD, numsH*sizeof(double2)));
CHKERR(hipMalloc(&cftotD, numcH*sizeof(double2)));
CHKERR(hipMalloc(&pfD, numpH*sizeof(double2)));
CHKERR(hipMalloc(&cevolD, numcH*sizeof(double)));
CHKERR(hipMalloc(&cduD, numcH*sizeof(double)));
CHKERR(hipMalloc(&cdivD, numcH*sizeof(double)));
CHKERR(hipMalloc(&zucD, numzH*sizeof(double2)));
CHKERR(hipMalloc(&crmuD, numcH*sizeof(double)));
CHKERR(hipMalloc(&cqeD, 2*numcH*sizeof(double2)));
CHKERR(hipMalloc(&ccosD, numcH*sizeof(double)));
CHKERR(hipMalloc(&cwD, numcH*sizeof(double)));
CHKERR(hipMalloc(&mapspkeyD, numsH*sizeof(int)));
CHKERR(hipMalloc(&mapspvalD, numsH*sizeof(int)));
CHKERR(hipMalloc(&mappsfirstD, numpH*sizeof(int)));
CHKERR(hipMalloc(&mapssnextD, numsH*sizeof(int)));
CHKERR(hipMemcpyToSymbol(schsfirst, &schsfirstD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(schslast, &schslastD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(schzfirst, &schzfirstD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(schzlast, &schzlastD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapsp1, &mapsp1D, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapsp2, &mapsp2D, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapsz, &mapszD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapss4, &mapss4D, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapspkey, &mapspkeyD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapspval, &mapspvalD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mappsfirst, &mappsfirstD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(mapssnext, &mapssnextD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(znump, &znumpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(px, &pxD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(pxp, &pxpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(px0, &px0D, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zx, &zxD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zxp, &zxpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(pu, &puD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(pu0, &pu0D, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(pap, &papD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(ssurf, &ssurfD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zm, &zmD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zr, &zrD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zrp, &zrpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(sarea, &sareaD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(svol, &svolD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zarea, &zareaD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zvol, &zvolD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zvol0, &zvol0D, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zdl, &zdlD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zdu, &zduD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(ze, &zeD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zetot, &zetotD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zw, &zwD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zwrate, &zwrateD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zp, &zpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zss, &zssD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(smf, &smfD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(careap, &careapD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(sareap, &sareapD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(svolp, &svolpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zareap, &zareapD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zvolp, &zvolpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cmaswt, &cmaswtD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(pmaswt, &pmaswtD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(sfp, &sfpD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(sft, &sftD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(sfq, &sfqD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cftot, &cftotD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(pf, &pfD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cevol, &cevolD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cdu, &cduD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cdiv, &cdivD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(zuc, &zucD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(crmu, &crmuD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cqe, &cqeD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(ccos, &ccosD, sizeof(void*)));
CHKERR(hipMemcpyToSymbol(cw, &cwD, sizeof(void*)));
CHKERR(hipMemcpy(schsfirstD, schsfirstH, numschH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(schslastD, schslastH, numschH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(schzfirstD, schzfirstH, numschH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(schzlastD, schzlastH, numschH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(mapsp1D, mapsp1H, numsH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(mapsp2D, mapsp2H, numsH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(mapszD, mapszH, numsH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(mapss4D, mapss4H, numsH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(znumpD, znumpH, numzH*sizeof(int), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(zmD, zmH, numzH*sizeof(double), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(smfD, smfH, numsH*sizeof(double), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(pxD, pxH, numpH*sizeof(double2), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(puD, puH, numpH*sizeof(double2), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(zrD, zrH, numzH*sizeof(double), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(zvolD, zvolH, numzH*sizeof(double), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(zeD, zeH, numzH*sizeof(double), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(zetotD, zetotH, numzH*sizeof(double), hipMemcpyHostToDevice));
CHKERR(hipMemcpy(zwrateD, zwrateH, numzH*sizeof(double), hipMemcpyHostToDevice));
thrust::device_ptr<int> mapsp1T(mapsp1D);
thrust::device_ptr<int> mapspkeyT(mapspkeyD);
thrust::device_ptr<int> mapspvalT(mapspvalD);
thrust::copy(mapsp1T, mapsp1T + numsH, mapspkeyT);
thrust::sequence(mapspvalT, mapspvalT + numsH);
thrust::stable_sort_by_key(mapspkeyT, mapspkeyT + numsH, mapspvalT);
int gridSize = (numsH+CHUNK_SIZE-1) / CHUNK_SIZE;
int chunkSize = CHUNK_SIZE;
hipLaunchKernelGGL(( gpuInvMap), dim3(gridSize), dim3(chunkSize), 0, 0, mapspkeyD, mapspvalD,
mappsfirstD, mapssnextD);
hipDeviceSynchronize();
int zero = 0;
CHKERR(hipMemcpyToSymbol(numsbad, &zero, sizeof(int)));
}
void hydroDoCycle(
const double dtH,
double& dtnextH,
int& idtnextH) {
int gridSizeS, gridSizeP, gridSizeZ, chunkSize;
CHKERR(hipMemcpyToSymbol(dt, &dtH, sizeof(double)));
gridSizeS = numschH;
gridSizeP = numpchH;
gridSizeZ = numzchH;
chunkSize = CHUNK_SIZE;
hipLaunchKernelGGL(( gpuMain1), dim3(gridSizeP), dim3(chunkSize), 0, 0, );
hipDeviceSynchronize();
hipLaunchKernelGGL(( gpuMain2), dim3(gridSizeS), dim3(chunkSize), 0, 0, );
hipDeviceSynchronize();
meshCheckBadSides();
hipLaunchKernelGGL(( gpuMain3), dim3(gridSizeP), dim3(chunkSize), 0, 0, );
hipDeviceSynchronize();
double bigval = 1.e99;
CHKERR(hipMemcpyToSymbol(dtnext, &bigval, sizeof(double)));
hipLaunchKernelGGL(( gpuMain4), dim3(gridSizeS), dim3(chunkSize), 0, 0, );
hipDeviceSynchronize();
hipLaunchKernelGGL(( gpuMain5), dim3(gridSizeZ), dim3(chunkSize), 0, 0, );
hipDeviceSynchronize();
meshCheckBadSides();
CHKERR(hipMemcpyFromSymbol(&dtnextH, dtnext, sizeof(double)));
CHKERR(hipMemcpyFromSymbol(&idtnextH, idtnext, sizeof(int)));
}
void hydroGetData(
const int numpH,
const int numzH,
double2* pxH,
double* zrH,
double* zeH,
double* zpH) {
CHKERR(hipMemcpy(pxH, pxD, numpH*sizeof(double2), hipMemcpyDeviceToHost));
CHKERR(hipMemcpy(zrH, zrD, numzH*sizeof(double), hipMemcpyDeviceToHost));
CHKERR(hipMemcpy(zeH, zeD, numzH*sizeof(double), hipMemcpyDeviceToHost));
CHKERR(hipMemcpy(zpH, zpD, numzH*sizeof(double), hipMemcpyDeviceToHost));
}
void hydroInitGPU()
{
int one = 1;
CHKERR(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
CHKERR(hipMemcpyToSymbol(gpuinit, &one, sizeof(int)));
}
void hydroFinalGPU()
{
}
| fbad09c9918c2b962d3a340801d0781c7e94af2d.cu | /*
* HydroGPU.cu
*
* Created on: Aug 2, 2012
* Author: cferenba
*
* Copyright (c) 2012, Los Alamos National Security, LLC.
* All rights reserved.
* Use of this source code is governed by a BSD-style open-source
* license; see top-level LICENSE file for full license text.
*/
#include "HydroGPU.hh"
#include <cmath>
#include <cstdio>
#include <algorithm>
#include <cuda_runtime.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include "Memory.hh"
#include "Vec2.hh"
using namespace std;
const int CHUNK_SIZE = 64;
static __constant__ int gpuinit;
static __constant__ int numsch;
static __constant__ int nump;
static __constant__ int numz;
static __constant__ int nums;
static __constant__ int numc;
static __constant__ double dt;
static __constant__ double pgamma, pssmin;
static __constant__ double talfa, tssmin;
static __constant__ double qgamma, q1, q2;
static __constant__ double hcfl, hcflv;
static __constant__ double2 vfixx, vfixy;
static __constant__ int numbcx, numbcy;
static __constant__ double bcx[2], bcy[2];
static __device__ int numsbad;
static __device__ double dtnext;
static __device__ int idtnext;
static __constant__ const int* schsfirst;
static __constant__ const int* schslast;
static __constant__ const int* schzfirst;
static __constant__ const int* schzlast;
static __constant__ const int* mapsp1;
static __constant__ const int* mapsp2;
static __constant__ const int* mapsz;
static __constant__ const int* mapss4;
static __constant__ const int *mapspkey, *mapspval;
static __constant__ const int *mappsfirst, *mapssnext;
static __constant__ const int* znump;
static __constant__ double2 *px, *pxp, *px0;
static __constant__ double2 *zx, *zxp;
static __constant__ double2 *pu, *pu0;
static __constant__ double2* pap;
static __constant__ double2* ssurf;
static __constant__ const double* zm;
static __constant__ double *zr, *zrp;
static __constant__ double *ze, *zetot;
static __constant__ double *zw, *zwrate;
static __constant__ double *zp, *zss;
static __constant__ const double* smf;
static __constant__ double *careap, *sareap, *svolp, *zareap, *zvolp;
static __constant__ double *sarea, *svol, *zarea, *zvol, *zvol0;
static __constant__ double *zdl, *zdu;
static __constant__ double *cmaswt, *pmaswt;
static __constant__ double2 *sfp, *sft, *sfq, *cftot, *pf;
static __constant__ double* cevol;
static __constant__ double* cdu;
static __constant__ double* cdiv;
static __constant__ double2* zuc;
static __constant__ double* crmu;
static __constant__ double2* cqe;
static __constant__ double* ccos;
static __constant__ double* cw;
static __shared__ int dss3[CHUNK_SIZE];
static __shared__ int dss4[CHUNK_SIZE];
static __shared__ double ctemp[CHUNK_SIZE];
static __shared__ double2 ctemp2[CHUNK_SIZE];
static int numschH, numpchH, numzchH;
static int *schsfirstH, *schslastH, *schzfirstH, *schzlastH;
static int *schsfirstD, *schslastD, *schzfirstD, *schzlastD;
static int *mapsp1D, *mapsp2D, *mapszD, *mapss4D, *znumpD;
static int *mapspkeyD, *mapspvalD;
static int *mappsfirstD, *mapssnextD;
static double2 *pxD, *pxpD, *px0D, *zxD, *zxpD, *puD, *pu0D, *papD,
*ssurfD, *sfpD, *sftD, *sfqD, *cftotD, *pfD, *zucD, *cqeD;
static double *zmD, *zrD, *zrpD,
*sareaD, *svolD, *zareaD, *zvolD, *zvol0D, *zdlD, *zduD,
*zeD, *zetot0D, *zetotD, *zwD, *zwrateD,
*zpD, *zssD, *smfD, *careapD, *sareapD, *svolpD, *zareapD, *zvolpD;
static double *cmaswtD, *pmaswtD;
static double *cevolD, *cduD, *cdivD, *crmuD, *ccosD, *cwD;
int checkCudaError(const cudaError_t err, const char* cmd)
{
if(err) {
printf("CUDA error in command '%s'\n", cmd); \
printf("Error message: %s\n", cudaGetErrorString(err)); \
}
return err;
}
#define CHKERR(cmd) checkCudaError(cmd, #cmd)
static __device__ void advPosHalf(
const int p,
const double2* __restrict__ px0,
const double2* __restrict__ pu0,
const double dt,
double2* __restrict__ pxp) {
pxp[p] = px0[p] + pu0[p] * dt;
}
static __device__ void calcZoneCtrs(
const int s,
const int s0,
const int z,
const int p1,
const double2* __restrict__ px,
double2* __restrict__ zx) {
ctemp2[s0] = px[p1];
__syncthreads();
double2 zxtot = ctemp2[s0];
double zct = 1.;
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
zxtot += ctemp2[sn];
zct += 1.;
}
zx[z] = zxtot / zct;
}
static __device__ void calcSideVols(
const int s,
const int z,
const int p1,
const int p2,
const double2* __restrict__ px,
const double2* __restrict__ zx,
double* __restrict__ sarea,
double* __restrict__ svol)
{
const double third = 1. / 3.;
double sa = 0.5 * cross(px[p2] - px[p1], zx[z] - px[p1]);
double sv = third * sa * (px[p1].x + px[p2].x + zx[z].x);
sarea[s] = sa;
svol[s] = sv;
if (sv <= 0.) atomicAdd(&numsbad, 1);
}
static __device__ void calcZoneVols(
const int s,
const int s0,
const int z,
const double* __restrict__ sarea,
const double* __restrict__ svol,
double* __restrict__ zarea,
double* __restrict__ zvol)
{
// make sure all side volumes have been stored
__syncthreads();
double zatot = sarea[s];
double zvtot = svol[s];
for (int sn = mapss4[s]; sn != s; sn = mapss4[sn]) {
zatot += sarea[sn];
zvtot += svol[sn];
}
zarea[z] = zatot;
zvol[z] = zvtot;
}
static __device__ void meshCalcCharLen(
const int s,
const int s0,
const int s3,
const int z,
const int p1,
const int p2,
const int* __restrict__ znump,
const double2* __restrict__ px,
const double2* __restrict__ zx,
double* __restrict__ zdl) {
double area = 0.5 * cross(px[p2] - px[p1], zx[z] - px[p1]);
double base = length(px[p2] - px[p1]);
double fac = (znump[z] == 3 ? 3. : 4.);
double sdl = fac * area / base;
ctemp[s0] = sdl;
__syncthreads();
double sdlmin = ctemp[s0];
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
sdlmin = min(sdlmin, ctemp[sn]);
}
zdl[z] = sdlmin;
}
static __device__ void hydroCalcRho(const int z,
const double* __restrict__ zm,
const double* __restrict__ zvol,
double* __restrict__ zr)
{
zr[z] = zm[z] / zvol[z];
}
static __device__ void pgasCalcForce(
const int s,
const int z,
const double* __restrict__ zp,
const double2* __restrict__ ssurf,
double2* __restrict__ sf) {
sf[s] = -zp[z] * ssurf[s];
}
static __device__ void ttsCalcForce(
const int s,
const int z,
const double* __restrict__ zarea,
const double* __restrict__ zr,
const double* __restrict__ zss,
const double* __restrict__ sarea,
const double* __restrict__ smf,
const double2* __restrict__ ssurf,
double2* __restrict__ sf) {
double svfacinv = zarea[z] / sarea[s];
double srho = zr[z] * smf[s] * svfacinv;
double sstmp = max(zss[z], tssmin);
sstmp = talfa * sstmp * sstmp;
double sdp = sstmp * (srho - zr[z]);
sf[s] = -sdp * ssurf[s];
}
// Routine number [2] in the full algorithm
// [2.1] Find the corner divergence
// [2.2] Compute the cos angle for c
// [2.3] Find the evolution factor cevol(c)
// and the Delta u(c) = du(c)
static __device__ void qcsSetCornerDiv(
const int s,
const int s0,
const int s3,
const int z,
const int p1,
const int p2) {
// [1] Compute a zone-centered velocity
ctemp2[s0] = pu[p1];
__syncthreads();
double2 zutot = ctemp2[s0];
double zct = 1.;
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
zutot += ctemp2[sn];
zct += 1.;
}
zuc[z] = zutot / zct;
// [2] Divergence at the corner
// Associated zone, corner, point
const int p0 = mapsp1[s3];
double2 up0 = pu[p1];
double2 xp0 = pxp[p1];
double2 up1 = 0.5 * (pu[p1] + pu[p2]);
double2 xp1 = 0.5 * (pxp[p1] + pxp[p2]);
double2 up2 = zuc[z];
double2 xp2 = zxp[z];
double2 up3 = 0.5 * (pu[p0] + pu[p1]);
double2 xp3 = 0.5 * (pxp[p0] + pxp[p1]);
// position, velocity diffs along diagonals
double2 up2m0 = up2 - up0;
double2 xp2m0 = xp2 - xp0;
double2 up3m1 = up3 - up1;
double2 xp3m1 = xp3 - xp1;
// average corner-centered velocity
double2 duav = 0.25 * (up0 + up1 + up2 + up3);
// compute cosine angle
double2 v1 = xp1 - xp0;
double2 v2 = xp3 - xp0;
double de1 = length(v1);
double de2 = length(v2);
double minelen = 2.0 * min(de1, de2);
ccos[s] = (minelen < 1.e-12 ? 0. : dot(v1, v2) / (de1 * de2));
// compute 2d cartesian volume of corner
double cvolume = 0.5 * cross(xp2m0, xp3m1);
careap[s] = cvolume;
// compute velocity divergence of corner
cdiv[s] = (cross(up2m0, xp3m1) - cross(up3m1, xp2m0)) /
(2.0 * cvolume);
// compute delta velocity
double dv1 = length2(up2m0 - up3m1);
double dv2 = length2(up2m0 + up3m1);
double du = sqrt(max(dv1, dv2));
cdu[s] = (cdiv[s] < 0.0 ? du : 0.);
// compute evolution factor
double2 dxx1 = 0.5 * (xp2m0 - xp3m1);
double2 dxx2 = 0.5 * (xp2m0 + xp3m1);
double dx1 = length(dxx1);
double dx2 = length(dxx2);
double test1 = abs(dot(dxx1, duav) * dx2);
double test2 = abs(dot(dxx2, duav) * dx1);
double num = (test1 > test2 ? dx1 : dx2);
double den = (test1 > test2 ? dx2 : dx1);
double r = num / den;
double evol = sqrt(4.0 * cvolume * r);
evol = min(evol, 2.0 * minelen);
cevol[s] = (cdiv[s] < 0.0 ? evol : 0.);
}
// Routine number [4] in the full algorithm CS2DQforce(...)
static __device__ void qcsSetQCnForce(
const int s,
const int s3,
const int z,
const int p1,
const int p2) {
const double gammap1 = qgamma + 1.0;
// [4.1] Compute the rmu (real Kurapatenko viscous scalar)
// Kurapatenko form of the viscosity
double ztmp2 = q2 * 0.25 * gammap1 * cdu[s];
double ztmp1 = q1 * zss[z];
double zkur = ztmp2 + sqrt(ztmp2 * ztmp2 + ztmp1 * ztmp1);
// Compute rmu for each corner
double rmu = zkur * zrp[z] * cevol[s];
rmu = (cdiv[s] > 0. ? 0. : rmu);
// [4.2] Compute the cqe for each corner
const int p0 = mapsp1[s3];
const double elen1 = length(pxp[p1] - pxp[p0]);
const double elen2 = length(pxp[p2] - pxp[p1]);
// Compute: cqe(1,2,3)=edge 1, y component (2nd), 3rd corner
// cqe(2,1,3)=edge 2, x component (1st)
cqe[2 * s] = rmu * (pu[p1] - pu[p0]) / elen1;
cqe[2 * s + 1] = rmu * (pu[p2] - pu[p1]) / elen2;
}
// Routine number [5] in the full algorithm CS2DQforce(...)
static __device__ void qcsSetForce(
const int s,
const int s4,
const int p1,
const int p2) {
// [5.1] Preparation of extra variables
double csin2 = 1. - ccos[s] * ccos[s];
cw[s] = ((csin2 < 1.e-4) ? 0. : careap[s] / csin2);
ccos[s] = ((csin2 < 1.e-4) ? 0. : ccos[s]);
__syncthreads();
// [5.2] Set-Up the forces on corners
const double2 x1 = pxp[p1];
const double2 x2 = pxp[p2];
// Edge length for c1, c2 contribution to s
double elen = length(x1 - x2);
sfq[s] = (cw[s] * (cqe[2*s+1] + ccos[s] * cqe[2*s]) +
cw[s4] * (cqe[2*s4] + ccos[s4] * cqe[2*s4+1]))
/ elen;
}
// Routine number [6] in the full algorithm
static __device__ void qcsSetVelDiff(
const int s,
const int s0,
const int p1,
const int p2,
const int z) {
double2 dx = pxp[p2] - pxp[p1];
double2 du = pu[p2] - pu[p1];
double lenx = length(dx);
double dux = dot(du, dx);
dux = (lenx > 0. ? abs(dux) / lenx : 0.);
ctemp[s0] = dux;
__syncthreads();
double ztmp = ctemp[s0];
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
ztmp = max(ztmp, ctemp[sn]);
}
__syncthreads();
zdu[z] = q1 * zss[z] + 2. * q2 * ztmp;
}
static __device__ void qcsCalcForce(
const int s,
const int s0,
const int s3,
const int s4,
const int z,
const int p1,
const int p2) {
// [1] Find the right, left, top, bottom edges to use for the
// limiters
// *** NOT IMPLEMENTED IN PENNANT ***
// [2] Compute corner divergence and related quantities
qcsSetCornerDiv(s, s0, s3, z, p1, p2);
// [3] Find the limiters Psi(c)
// *** NOT IMPLEMENTED IN PENNANT ***
// [4] Compute the Q vector (corner based)
qcsSetQCnForce(s, s3, z, p1, p2);
// [5] Compute the Q forces
qcsSetForce(s, s4, p1, p2);
ctemp2[s0] = sfp[s] + sft[s] + sfq[s];
__syncthreads();
cftot[s] = ctemp2[s0] - ctemp2[s0 + dss3[s0]];
// [6] Set velocity difference to use to compute timestep
qcsSetVelDiff(s, s0, p1, p2, z);
}
static __device__ void calcCrnrMass(
const int s,
const int s3,
const int z,
const double* __restrict__ zr,
const double* __restrict__ zarea,
const double* __restrict__ smf,
double* __restrict__ cmaswt)
{
double m = zr[z] * zarea[z] * 0.5 * (smf[s] + smf[s3]);
cmaswt[s] = m;
}
static __device__ void pgasCalcEOS(
const int z,
const double* __restrict__ zr,
const double* __restrict__ ze,
double* __restrict__ zp,
double& zper,
double* __restrict__ zss)
{
const double gm1 = pgamma - 1.;
const double ss2 = max(pssmin * pssmin, 1.e-99);
double rx = zr[z];
double ex = max(ze[z], 0.0);
double px = gm1 * rx * ex;
double prex = gm1 * ex;
double perx = gm1 * rx;
double csqd = max(ss2, prex + perx * px / (rx * rx));
zp[z] = px;
zper = perx;
zss[z] = sqrt(csqd);
}
static __device__ void pgasCalcStateAtHalf(
const int z,
const double* __restrict__ zr0,
const double* __restrict__ zvolp,
const double* __restrict__ zvol0,
const double* __restrict__ ze,
const double* __restrict__ zwrate,
const double* __restrict__ zm,
const double dt,
double* __restrict__ zp,
double* __restrict__ zss)
{
double zper;
pgasCalcEOS(z, zr0, ze, zp, zper, zss);
const double dth = 0.5 * dt;
const double zminv = 1. / zm[z];
double dv = (zvolp[z] - zvol0[z]) * zminv;
double bulk = zr0[z] * zss[z] * zss[z];
double denom = 1. + 0.5 * zper * dv;
double src = zwrate[z] * dth * zminv;
zp[z] += (zper * src - zr0[z] * bulk * dv) / denom;
}
static __global__ void gpuInvMap(
const int* mapspkey,
const int* mapspval,
int* mappsfirst,
int* mapssnext)
{
const int i = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (i >= nums) return;
int p = mapspkey[i];
int pp = mapspkey[i+1];
int pm = mapspkey[i-1];
int s = mapspval[i];
int sp = mapspval[i+1];
if (i == 0 || p != pm) mappsfirst[p] = s;
if (i+1 == nums || p != pp)
mapssnext[s] = -1;
else
mapssnext[s] = sp;
}
static __device__ void gatherToPoints(
const int p,
const double* __restrict__ cvar,
double* __restrict__ pvar)
{
double x = 0.;
for (int s = mappsfirst[p]; s >= 0; s = mapssnext[s]) {
x += cvar[s];
}
pvar[p] = x;
}
static __device__ void gatherToPoints(
const int p,
const double2* __restrict__ cvar,
double2* __restrict__ pvar)
{
double2 x = make_double2(0., 0.);
for (int s = mappsfirst[p]; s >= 0; s = mapssnext[s]) {
x += cvar[s];
}
pvar[p] = x;
}
static __device__ void applyFixedBC(
const int p,
const double2* __restrict__ px,
double2* __restrict__ pu,
double2* __restrict__ pf,
const double2 vfix,
const double bcconst) {
const double eps = 1.e-12;
double dp = dot(px[p], vfix);
if (fabs(dp - bcconst) < eps) {
pu[p] = project(pu[p], vfix);
pf[p] = project(pf[p], vfix);
}
}
static __device__ void calcAccel(
const int p,
const double2* __restrict__ pf,
const double* __restrict__ pmass,
double2* __restrict__ pa) {
const double fuzz = 1.e-99;
pa[p] = pf[p] / max(pmass[p], fuzz);
}
static __device__ void advPosFull(
const int p,
const double2* __restrict__ px0,
const double2* __restrict__ pu0,
const double2* __restrict__ pa,
const double dt,
double2* __restrict__ px,
double2* __restrict__ pu) {
pu[p] = pu0[p] + pa[p] * dt;
px[p] = px0[p] + 0.5 * (pu[p] + pu0[p]) * dt;
}
static __device__ void hydroCalcWork(
const int s,
const int s0,
const int s3,
const int z,
const int p1,
const int p2,
const double2* __restrict__ sf,
const double2* __restrict__ sf2,
const double2* __restrict__ pu0,
const double2* __restrict__ pu,
const double2* __restrict__ px,
const double dt,
double* __restrict__ zw,
double* __restrict__ zetot) {
// Compute the work done by finding, for each element/node pair
// dwork= force * vavg
// where force is the force of the element on the node
// and vavg is the average velocity of the node over the time period
double sd1 = dot( (sf[s] + sf2[s]), (pu0[p1] + pu[p1]));
double sd2 = dot(-(sf[s] + sf2[s]), (pu0[p2] + pu[p2]));
double dwork = -0.5 * dt * (sd1 * px[p1].x + sd2 * px[p2].x);
ctemp[s0] = dwork;
double etot = zetot[z];
__syncthreads();
double dwtot = ctemp[s0];
for (int sn = s0 + dss4[s0]; sn != s0; sn += dss4[sn]) {
dwtot += ctemp[sn];
}
zetot[z] = etot + dwtot;
zw[z] = dwtot;
}
static __device__ void hydroCalcWorkRate(
const int z,
const double* __restrict__ zvol0,
const double* __restrict__ zvol,
const double* __restrict__ zw,
const double* __restrict__ zp,
const double dt,
double* __restrict__ zwrate) {
double dvol = zvol[z] - zvol0[z];
zwrate[z] = (zw[z] + zp[z] * dvol) / dt;
}
static __device__ void hydroCalcEnergy(
const int z,
const double* __restrict__ zetot,
const double* __restrict__ zm,
double* __restrict__ ze) {
const double fuzz = 1.e-99;
ze[z] = zetot[z] / (zm[z] + fuzz);
}
static __device__ void hydroCalcDtCourant(
const int z,
const double* __restrict__ zdu,
const double* __restrict__ zss,
const double* __restrict__ zdl,
double& dtz,
int& idtz) {
const double fuzz = 1.e-99;
double cdu = max(zdu[z], max(zss[z], fuzz));
double dtzcour = zdl[z] * hcfl / cdu;
dtz = dtzcour;
idtz = z << 1;
}
static __device__ void hydroCalcDtVolume(
const int z,
const double* __restrict__ zvol,
const double* __restrict__ zvol0,
const double dtlast,
double& dtz,
int& idtz) {
double zdvov = abs((zvol[z] - zvol0[z]) / zvol0[z]);
double dtzvol = dtlast * hcflv / zdvov;
if (dtzvol < dtz) {
dtz = dtzvol;
idtz = (z << 1) | 1;
}
}
static __device__ double atomicMin(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(min(val,
__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
static __device__ void hydroFindMinDt(
const int z,
const int z0,
const int zlength,
const double dtz,
const int idtz,
double& dtnext,
int& idtnext) {
int* ctempi = (int*) ctemp2;
ctemp[z0] = dtz;
ctempi[z0] = idtz;
__syncthreads();
int len = zlength;
int half = len >> 1;
while (z0 < half) {
len = half + (len & 1);
if (ctemp[z0+len] < ctemp[z0]) {
ctemp[z0] = ctemp[z0+len];
ctempi[z0] = ctempi[z0+len];
}
__syncthreads();
half = len >> 1;
}
if (z0 == 0 && ctemp[0] < dtnext) {
atomicMin(&dtnext, ctemp[0]);
// This line isn't 100% thread-safe, but since it is only for
// a debugging aid, I'm not going to worry about it.
if (dtnext == ctemp[0]) idtnext = ctempi[0];
}
}
static __device__ void hydroCalcDt(
const int z,
const int z0,
const int zlength,
const double* __restrict__ zdu,
const double* __restrict__ zss,
const double* __restrict__ zdl,
const double* __restrict__ zvol,
const double* __restrict__ zvol0,
const double dtlast,
double& dtnext,
int& idtnext) {
double dtz;
int idtz;
hydroCalcDtCourant(z, zdu, zss, zdl, dtz, idtz);
hydroCalcDtVolume(z, zvol, zvol0, dt, dtz, idtz);
hydroFindMinDt(z, z0, zlength, dtz, idtz, dtnext, idtnext);
}
static __global__ void gpuMain1()
{
const int p = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (p >= nump) return;
double dth = 0.5 * dt;
// save off point variable values from previous cycle
px0[p] = px[p];
pu0[p] = pu[p];
// ===== Predictor step =====
// 1. advance mesh to center of time step
advPosHalf(p, px0, pu0, dth, pxp);
}
static __global__ void gpuMain2()
{
const int s0 = threadIdx.x;
const int sch = blockIdx.x;
const int s = schsfirst[sch] + s0;
if (s >= schslast[sch]) return;
const int p1 = mapsp1[s];
const int p2 = mapsp2[s];
const int z = mapsz[s];
const int s4 = mapss4[s];
const int s04 = s4 - schsfirst[sch];
dss4[s0] = s04 - s0;
dss3[s04] = s0 - s04;
__syncthreads();
const int s3 = s + dss3[s0];
// save off zone variable values from previous cycle
zvol0[z] = zvol[z];
// 1a. compute new mesh geometry
calcZoneCtrs(s, s0, z, p1, pxp, zxp);
meshCalcCharLen(s, s0, s3, z, p1, p2, znump, pxp, zxp, zdl);
ssurf[s] = rotateCCW(0.5 * (pxp[p1] + pxp[p2]) - zxp[z]);
calcSideVols(s, z, p1, p2, pxp, zxp, sareap, svolp);
calcZoneVols(s, s0, z, sareap, svolp, zareap, zvolp);
// 2. compute corner masses
hydroCalcRho(z, zm, zvolp, zrp);
calcCrnrMass(s, s3, z, zrp, zareap, smf, cmaswt);
// 3. compute material state (half-advanced)
// call this routine from only one thread per zone
if (s3 > s) pgasCalcStateAtHalf(z, zr, zvolp, zvol0, ze, zwrate,
zm, dt, zp, zss);
__syncthreads();
// 4. compute forces
pgasCalcForce(s, z, zp, ssurf, sfp);
ttsCalcForce(s, z, zareap, zrp, zss, sareap, smf, ssurf, sft);
qcsCalcForce(s, s0, s3, s4, z, p1, p2);
}
static __global__ void gpuMain3()
{
const int p = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (p >= nump) return;
// gather corner masses, forces to points
gatherToPoints(p, cmaswt, pmaswt);
gatherToPoints(p, cftot, pf);
// 4a. apply boundary conditions
for (int bc = 0; bc < numbcx; ++bc)
applyFixedBC(p, pxp, pu0, pf, vfixx, bcx[bc]);
for (int bc = 0; bc < numbcy; ++bc)
applyFixedBC(p, pxp, pu0, pf, vfixy, bcy[bc]);
// 5. compute accelerations
calcAccel(p, pf, pmaswt, pap);
// ===== Corrector step =====
// 6. advance mesh to end of time step
advPosFull(p, px0, pu0, pap, dt, px, pu);
}
static __global__ void gpuMain4()
{
const int s0 = threadIdx.x;
const int sch = blockIdx.x;
const int s = schsfirst[sch] + s0;
if (s >= schslast[sch]) return;
const int p1 = mapsp1[s];
const int p2 = mapsp2[s];
const int z = mapsz[s];
const int s4 = mapss4[s];
const int s04 = s4 - schsfirst[sch];
dss4[s0] = s04 - s0;
dss3[s04] = s0 - s04;
__syncthreads();
const int s3 = s + dss3[s0];
// 6a. compute new mesh geometry
calcZoneCtrs(s, s0, z, p1, px, zx);
calcSideVols(s, z, p1, p2, px, zx, sarea, svol);
calcZoneVols(s, s0, z, sarea, svol, zarea, zvol);
// 7. compute work
hydroCalcWork(s, s0, s3, z, p1, p2, sfp, sfq, pu0, pu, pxp, dt,
zw, zetot);
}
static __global__ void gpuMain5()
{
const int z = blockIdx.x * CHUNK_SIZE + threadIdx.x;
if (z >= numz) return;
const int z0 = threadIdx.x;
const int zlength = min(CHUNK_SIZE, numz - blockIdx.x * CHUNK_SIZE);
// 7. compute work
hydroCalcWorkRate(z, zvol0, zvol, zw, zp, dt, zwrate);
// 8. update state variables
hydroCalcEnergy(z, zetot, zm, ze);
hydroCalcRho(z, zm, zvol, zr);
// 9. compute timestep for next cycle
hydroCalcDt(z, z0, zlength, zdu, zss, zdl, zvol, zvol0, dt,
dtnext, idtnext);
}
void meshCheckBadSides() {
int numsbadH;
CHKERR(cudaMemcpyFromSymbol(&numsbadH, numsbad, sizeof(int)));
// if there were negative side volumes, error exit
if (numsbadH > 0) {
cerr << "Error: " << numsbadH << " negative side volumes" << endl;
cerr << "Exiting..." << endl;
exit(1);
}
}
void computeChunks(
const int nums,
const int numz,
const int* mapsz,
const int chunksize,
int& numsch,
int*& schsfirst,
int*& schslast,
int*& schzfirst,
int*& schzlast) {
int* stemp1 = Memory::alloc<int>(nums/3+1);
int* stemp2 = Memory::alloc<int>(nums/3+1);
int* ztemp1 = Memory::alloc<int>(nums/3+1);
int* ztemp2 = Memory::alloc<int>(nums/3+1);
int nsch = 0;
int s1;
int s2 = 0;
while (s2 < nums) {
s1 = s2;
s2 = min(s2 + chunksize, nums);
if (s2 < nums) {
while (mapsz[s2] == mapsz[s2-1]) --s2;
}
stemp1[nsch] = s1;
stemp2[nsch] = s2;
ztemp1[nsch] = mapsz[s1];
ztemp2[nsch] = (s2 == nums ? numz : mapsz[s2]);
++nsch;
}
numsch = nsch;
schsfirst = Memory::alloc<int>(numsch);
schslast = Memory::alloc<int>(numsch);
schzfirst = Memory::alloc<int>(numsch);
schzlast = Memory::alloc<int>(numsch);
copy(stemp1, stemp1 + numsch, schsfirst);
copy(stemp2, stemp2 + numsch, schslast);
copy(ztemp1, ztemp1 + numsch, schzfirst);
copy(ztemp2, ztemp2 + numsch, schzlast);
Memory::free(stemp1);
Memory::free(stemp2);
Memory::free(ztemp1);
Memory::free(ztemp2);
}
void hydroInit(
const int numpH,
const int numzH,
const int numsH,
const int numcH,
const int numeH,
const double pgammaH,
const double pssminH,
const double talfaH,
const double tssminH,
const double qgammaH,
const double q1H,
const double q2H,
const double hcflH,
const double hcflvH,
const int numbcxH,
const double* bcxH,
const int numbcyH,
const double* bcyH,
const double2* pxH,
const double2* puH,
const double* zmH,
const double* zrH,
const double* zvolH,
const double* zeH,
const double* zetotH,
const double* zwrateH,
const double* smfH,
const int* mapsp1H,
const int* mapsp2H,
const int* mapszH,
const int* mapss4H,
const int* mapseH,
const int* znumpH) {
printf("Running Hydro on device...\n");
computeChunks(numsH, numzH, mapszH, CHUNK_SIZE, numschH,
schsfirstH, schslastH, schzfirstH, schzlastH);
numpchH = (numpH+CHUNK_SIZE-1) / CHUNK_SIZE;
numzchH = (numzH+CHUNK_SIZE-1) / CHUNK_SIZE;
CHKERR(cudaMemcpyToSymbol(numsch, &numschH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(nump, &numpH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(numz, &numzH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(nums, &numsH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(numc, &numcH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(pgamma, &pgammaH, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(pssmin, &pssminH, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(talfa, &talfaH, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(tssmin, &tssminH, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(qgamma, &qgammaH, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(q1, &q1H, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(q2, &q2H, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(hcfl, &hcflH, sizeof(double)));
CHKERR(cudaMemcpyToSymbol(hcflv, &hcflvH, sizeof(double)));
const double2 vfixxH = make_double2(1., 0.);
const double2 vfixyH = make_double2(0., 1.);
CHKERR(cudaMemcpyToSymbol(vfixx, &vfixxH, sizeof(double2)));
CHKERR(cudaMemcpyToSymbol(vfixy, &vfixyH, sizeof(double2)));
CHKERR(cudaMemcpyToSymbol(numbcx, &numbcxH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(numbcy, &numbcyH, sizeof(int)));
CHKERR(cudaMemcpyToSymbol(bcx, bcxH, numbcxH*sizeof(double)));
CHKERR(cudaMemcpyToSymbol(bcy, bcyH, numbcyH*sizeof(double)));
CHKERR(cudaMalloc(&schsfirstD, numschH*sizeof(int)));
CHKERR(cudaMalloc(&schslastD, numschH*sizeof(int)));
CHKERR(cudaMalloc(&schzfirstD, numschH*sizeof(int)));
CHKERR(cudaMalloc(&schzlastD, numschH*sizeof(int)));
CHKERR(cudaMalloc(&mapsp1D, numsH*sizeof(int)));
CHKERR(cudaMalloc(&mapsp2D, numsH*sizeof(int)));
CHKERR(cudaMalloc(&mapszD, numsH*sizeof(int)));
CHKERR(cudaMalloc(&mapss4D, numsH*sizeof(int)));
CHKERR(cudaMalloc(&znumpD, numzH*sizeof(int)));
CHKERR(cudaMalloc(&pxD, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&pxpD, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&px0D, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&zxD, numzH*sizeof(double2)));
CHKERR(cudaMalloc(&zxpD, numzH*sizeof(double2)));
CHKERR(cudaMalloc(&puD, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&pu0D, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&papD, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&ssurfD, numsH*sizeof(double2)));
CHKERR(cudaMalloc(&zmD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zrD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zrpD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&sareaD, numsH*sizeof(double)));
CHKERR(cudaMalloc(&svolD, numsH*sizeof(double)));
CHKERR(cudaMalloc(&zareaD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zvolD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zvol0D, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zdlD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zduD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zeD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zetot0D, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zetotD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zwD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zwrateD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zpD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zssD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&smfD, numsH*sizeof(double)));
CHKERR(cudaMalloc(&careapD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&sareapD, numsH*sizeof(double)));
CHKERR(cudaMalloc(&svolpD, numsH*sizeof(double)));
CHKERR(cudaMalloc(&zareapD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&zvolpD, numzH*sizeof(double)));
CHKERR(cudaMalloc(&cmaswtD, numsH*sizeof(double)));
CHKERR(cudaMalloc(&pmaswtD, numpH*sizeof(double)));
CHKERR(cudaMalloc(&sfpD, numsH*sizeof(double2)));
CHKERR(cudaMalloc(&sftD, numsH*sizeof(double2)));
CHKERR(cudaMalloc(&sfqD, numsH*sizeof(double2)));
CHKERR(cudaMalloc(&cftotD, numcH*sizeof(double2)));
CHKERR(cudaMalloc(&pfD, numpH*sizeof(double2)));
CHKERR(cudaMalloc(&cevolD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&cduD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&cdivD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&zucD, numzH*sizeof(double2)));
CHKERR(cudaMalloc(&crmuD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&cqeD, 2*numcH*sizeof(double2)));
CHKERR(cudaMalloc(&ccosD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&cwD, numcH*sizeof(double)));
CHKERR(cudaMalloc(&mapspkeyD, numsH*sizeof(int)));
CHKERR(cudaMalloc(&mapspvalD, numsH*sizeof(int)));
CHKERR(cudaMalloc(&mappsfirstD, numpH*sizeof(int)));
CHKERR(cudaMalloc(&mapssnextD, numsH*sizeof(int)));
CHKERR(cudaMemcpyToSymbol(schsfirst, &schsfirstD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(schslast, &schslastD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(schzfirst, &schzfirstD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(schzlast, &schzlastD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapsp1, &mapsp1D, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapsp2, &mapsp2D, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapsz, &mapszD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapss4, &mapss4D, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapspkey, &mapspkeyD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapspval, &mapspvalD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mappsfirst, &mappsfirstD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(mapssnext, &mapssnextD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(znump, &znumpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(px, &pxD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(pxp, &pxpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(px0, &px0D, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zx, &zxD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zxp, &zxpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(pu, &puD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(pu0, &pu0D, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(pap, &papD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(ssurf, &ssurfD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zm, &zmD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zr, &zrD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zrp, &zrpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(sarea, &sareaD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(svol, &svolD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zarea, &zareaD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zvol, &zvolD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zvol0, &zvol0D, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zdl, &zdlD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zdu, &zduD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(ze, &zeD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zetot, &zetotD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zw, &zwD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zwrate, &zwrateD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zp, &zpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zss, &zssD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(smf, &smfD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(careap, &careapD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(sareap, &sareapD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(svolp, &svolpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zareap, &zareapD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zvolp, &zvolpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cmaswt, &cmaswtD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(pmaswt, &pmaswtD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(sfp, &sfpD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(sft, &sftD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(sfq, &sfqD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cftot, &cftotD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(pf, &pfD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cevol, &cevolD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cdu, &cduD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cdiv, &cdivD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(zuc, &zucD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(crmu, &crmuD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cqe, &cqeD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(ccos, &ccosD, sizeof(void*)));
CHKERR(cudaMemcpyToSymbol(cw, &cwD, sizeof(void*)));
CHKERR(cudaMemcpy(schsfirstD, schsfirstH, numschH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(schslastD, schslastH, numschH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(schzfirstD, schzfirstH, numschH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(schzlastD, schzlastH, numschH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(mapsp1D, mapsp1H, numsH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(mapsp2D, mapsp2H, numsH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(mapszD, mapszH, numsH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(mapss4D, mapss4H, numsH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(znumpD, znumpH, numzH*sizeof(int), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(zmD, zmH, numzH*sizeof(double), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(smfD, smfH, numsH*sizeof(double), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(pxD, pxH, numpH*sizeof(double2), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(puD, puH, numpH*sizeof(double2), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(zrD, zrH, numzH*sizeof(double), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(zvolD, zvolH, numzH*sizeof(double), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(zeD, zeH, numzH*sizeof(double), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(zetotD, zetotH, numzH*sizeof(double), cudaMemcpyHostToDevice));
CHKERR(cudaMemcpy(zwrateD, zwrateH, numzH*sizeof(double), cudaMemcpyHostToDevice));
thrust::device_ptr<int> mapsp1T(mapsp1D);
thrust::device_ptr<int> mapspkeyT(mapspkeyD);
thrust::device_ptr<int> mapspvalT(mapspvalD);
thrust::copy(mapsp1T, mapsp1T + numsH, mapspkeyT);
thrust::sequence(mapspvalT, mapspvalT + numsH);
thrust::stable_sort_by_key(mapspkeyT, mapspkeyT + numsH, mapspvalT);
int gridSize = (numsH+CHUNK_SIZE-1) / CHUNK_SIZE;
int chunkSize = CHUNK_SIZE;
gpuInvMap<<<gridSize, chunkSize>>>(mapspkeyD, mapspvalD,
mappsfirstD, mapssnextD);
cudaDeviceSynchronize();
int zero = 0;
CHKERR(cudaMemcpyToSymbol(numsbad, &zero, sizeof(int)));
}
void hydroDoCycle(
const double dtH,
double& dtnextH,
int& idtnextH) {
int gridSizeS, gridSizeP, gridSizeZ, chunkSize;
CHKERR(cudaMemcpyToSymbol(dt, &dtH, sizeof(double)));
gridSizeS = numschH;
gridSizeP = numpchH;
gridSizeZ = numzchH;
chunkSize = CHUNK_SIZE;
gpuMain1<<<gridSizeP, chunkSize>>>();
cudaDeviceSynchronize();
gpuMain2<<<gridSizeS, chunkSize>>>();
cudaDeviceSynchronize();
meshCheckBadSides();
gpuMain3<<<gridSizeP, chunkSize>>>();
cudaDeviceSynchronize();
double bigval = 1.e99;
CHKERR(cudaMemcpyToSymbol(dtnext, &bigval, sizeof(double)));
gpuMain4<<<gridSizeS, chunkSize>>>();
cudaDeviceSynchronize();
gpuMain5<<<gridSizeZ, chunkSize>>>();
cudaDeviceSynchronize();
meshCheckBadSides();
CHKERR(cudaMemcpyFromSymbol(&dtnextH, dtnext, sizeof(double)));
CHKERR(cudaMemcpyFromSymbol(&idtnextH, idtnext, sizeof(int)));
}
void hydroGetData(
const int numpH,
const int numzH,
double2* pxH,
double* zrH,
double* zeH,
double* zpH) {
CHKERR(cudaMemcpy(pxH, pxD, numpH*sizeof(double2), cudaMemcpyDeviceToHost));
CHKERR(cudaMemcpy(zrH, zrD, numzH*sizeof(double), cudaMemcpyDeviceToHost));
CHKERR(cudaMemcpy(zeH, zeD, numzH*sizeof(double), cudaMemcpyDeviceToHost));
CHKERR(cudaMemcpy(zpH, zpD, numzH*sizeof(double), cudaMemcpyDeviceToHost));
}
void hydroInitGPU()
{
int one = 1;
CHKERR(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
CHKERR(cudaMemcpyToSymbol(gpuinit, &one, sizeof(int)));
}
void hydroFinalGPU()
{
}
|
23d545c48a3271a30e0102803a2dbb95444610df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// bench includes
#include "benchmarks/interface/TextureBenchmark.hpp"
#include "cudatools/interface/ErrorHandler.hpp"
#include "util/interface/Repeat.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
texture<int, 1, hipReadModeElementType> Surface;
__global__
void cudaTextureMemoryStride(long long int* latency) {
int k = 0;
long long int start = clock64();
for (unsigned i = 0; i < LOOP_REPEATS; i++) {
repeat(UNROLL_REPEATS, k = tex1Dfetch(Surface, k); )
}
long long int end = clock64();
if (k < 0) {
*latency = -1;
} else {
*latency = start < end ? end-start : end + (0xffffffffffffffff - start);
}
}
void cudaTextureMemoryStrideWrapper(int* deviceStrides, unsigned elems, long long int* deviceLatency, const dim3& grid, const dim3& block) {
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int>();
Surface.addressMode[0] = hipAddressModeWrap;
Surface.addressMode[1] = hipAddressModeWrap;
Surface.filterMode = hipFilterModePoint;
Surface.normalized = false;
check( hipBindTexture(0, Surface, deviceStrides, channelDesc, elems * sizeof(int)) );
hipLaunchKernelGGL(( cudaTextureMemoryStride), dim3(grid), dim3(block), 0, 0, deviceLatency);
} | 23d545c48a3271a30e0102803a2dbb95444610df.cu | // bench includes
#include "benchmarks/interface/TextureBenchmark.hpp"
#include "cudatools/interface/ErrorHandler.hpp"
#include "util/interface/Repeat.hpp"
////////////////////////////////////////////////////////////////////////////////////////////////////
texture<int, 1, cudaReadModeElementType> Surface;
__global__
void cudaTextureMemoryStride(long long int* latency) {
int k = 0;
long long int start = clock64();
for (unsigned i = 0; i < LOOP_REPEATS; i++) {
repeat(UNROLL_REPEATS, k = tex1Dfetch(Surface, k); )
}
long long int end = clock64();
if (k < 0) {
*latency = -1;
} else {
*latency = start < end ? end-start : end + (0xffffffffffffffff - start);
}
}
void cudaTextureMemoryStrideWrapper(int* deviceStrides, unsigned elems, long long int* deviceLatency, const dim3& grid, const dim3& block) {
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int>();
Surface.addressMode[0] = cudaAddressModeWrap;
Surface.addressMode[1] = cudaAddressModeWrap;
Surface.filterMode = cudaFilterModePoint;
Surface.normalized = false;
check( cudaBindTexture(0, Surface, deviceStrides, channelDesc, elems * sizeof(int)) );
cudaTextureMemoryStride<<<grid, block>>>(deviceLatency);
} |
a6278621f6b4014c9417c6ab034440f765a756d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kBiggerThan(float* gMat1, float* gMat2, float* gMatTarget, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numElements)
gMatTarget[idx] = gMat1[idx] > gMat2[idx];
} | a6278621f6b4014c9417c6ab034440f765a756d9.cu | #include "includes.h"
__global__ void kBiggerThan(float* gMat1, float* gMat2, float* gMatTarget, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numElements)
gMatTarget[idx] = gMat1[idx] > gMat2[idx];
} |
2ea5600a0e5557aaabe9c2a7b2eb3c141470c638.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "iostream"
__global__ void hello_fromGPU(int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World from thread %d-%d\n", n, tid);
}
void hello_fromCPU()
{
printf("Hello World from CPU\n");
}
int main()
{
hipLaunchKernelGGL(( hello_fromGPU), dim3(2),dim3(3), 0, 0, 0);
//hipLaunchKernelGGL(( hello_fromGPU), dim3(2),dim3(3), 0, 0, 1);
// hipDeviceSynchronize();
hello_fromCPU();
return 0;
}
| 2ea5600a0e5557aaabe9c2a7b2eb3c141470c638.cu | #include "iostream"
__global__ void hello_fromGPU(int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
printf("Hello World from thread %d-%d\n", n, tid);
}
void hello_fromCPU()
{
printf("Hello World from CPU\n");
}
int main()
{
hello_fromGPU<<<2,3>>>(0);
// hello_fromGPU<<<2,3>>>(1);
// cudaDeviceSynchronize();
hello_fromCPU();
return 0;
}
|
3efa55e69dec82b57dcd2d3aa9ab1b5d8bf18886.hip | // !!! This is a file automatically generated by hipify!!!
/*
* CSS-535 Program 2: Matrix Multiplication on GPU
* Author: Afrooz Rahmati
* 02-23-2021
*
* special thanks to Tony Varela, I worked with him on previous lab for
* vector-matrix multiplication, so I used some of our previous implementations
* our previous work is here (private): https://github.com/valenotary/CSS-535-Lab-03-GEMV
* Compile command = nvcc -std=c++11 naive1D.cu -lcublas -o naive1D
* Run command with profiling = ./naive1D matrix_size number_of_Blocks number_of_threads
* Run example : ./naive1D 1000 1000 1000
*/
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <rocblas.h> // as a benchmark
#include <random> // for random initialization
#include <chrono> // timing
#include <iostream> // for output
using namespace std;
using namespace std::chrono;
/*
* Functionality: Initializing Matrix with float values other than 0 and 1
* input:
m = the input matrix
M, N = matrix dimensions M X N
output:
None
*/
void initialize_matrix(float *m, const int M, const int N) {
//std::default_random_engine dre;
// dre.seed(3); // seeded for reproducibility
// const std::uniform_real_distribution<float> uniform_dist(-10, 10); // uniform distribution [-10, 10]
for (size_t i=0; i < M; i++)
//just to ignore 0 and 1 for error handling
//This will generate a number from some 1 to some arbitrary HI=99998
for (size_t j=0; j < N; j++)
m[i * M + j] = 1 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(9)));
}
/*
functionality: naive Matrix Multiplication Implementation a*b=c
input parameters:
a : the input matrix
b : the input matrix
c : the result matrix
N : Matrices dimension( number of elements )
consideration : the matrix size is square */
__global__ void multiplication(float *a, float *b, float *c, const int n){
int col = blockIdx.x * blockDim.x + threadIdx.x; //location in c
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0f;
if( col < n && row < n)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = sum;
}
}
//functionality: Computing the max residual between two matrices
//pre condition : M as matrices size ,resultVector2 and resultVector are two matrixes stores in 1D dimensions
//post condition: return the residual value as double precision
float residual(int M, float* resultVector2, float* resultVector) {
//float* k = new float[M];
float max_res =0.0;
float accum = 0.0;
for (int i = 0; i < M; ++i) {
accum = fabs(resultVector2[i] - resultVector[i]);
max_res=max(max_res,accum) ;
}
return max_res;
}
/*
* Functionality: Printing Matrix values for Debugging purpose only
* input:
a = the input matrix
M, N = matrix dimensions M X N
d = The text before each value
output:
None
*/
void print_matrix(float *a, const int N, const int M, char *d) {
int i, j;
for(i=0; i<N; i++)
{
printf("\n%s[%d]:", d, i);
for (j=0; j<M; j++)
printf("\t%6.4f", a[i*M+j]);
}
printf("\n");
}
/*
* Functionality: transpose the matrix - for comparing the result with cublas
* input:
src = the input matrix
dst = expected transpose matrix
M, N = matrix dimensions M X N
output:
None
*/
void transpose(float *src, float *dst, const int N, const int M) {
for(int n = 0; n<N*M; n++) {
int i = n/N;
int j = n%N;
dst[n] = src[M*j + i];
}
}
int main(int argc, char **argv) {
// This program should only take in the M and N dimensions; within the program, we figure out the execution configurations ourselves
if( argc != 4 )
{
std::cout << "please enter matrix size , number of blocks ,number of threads per block" << std::endl;
return 0;
}
const size_t N = atoi(argv[1]) ;
// let's create the grid / block configuration, but just really simply.
dim3 blockPerGrid (atoi(argv[2]) , atoi(argv[2]) );
dim3 threadPerBlock (atoi(argv[3]) , atoi(argv[3]) );
// cublas declarations
hipblasHandle_t cublas_handle;
// allocate host memory
float *a = (float*)malloc( N * N * sizeof(float) );
float *b = (float*)malloc( N * N * sizeof(float) );
float *c_out_naive= (float*)malloc( N * N * sizeof(float) );
float *c_out_cublas= (float*)malloc( N * N * sizeof(float) );
float *c_transposed_cublas= (float*)malloc( N * N * sizeof(float) );
// allocate device memory
float *d_a, *d_b, *d_c_out_naive, *d_c_out_cublas;
hipMalloc((void**)&d_a, N * N * sizeof(float));
hipMalloc((void**)&d_b, N * N * sizeof(float));
hipMalloc((void**)&d_c_out_naive, N * N * sizeof(float));
hipMalloc((void**)&d_c_out_cublas, N * N * sizeof(float));
// initialize host array with random data
initialize_matrix(a, N, N);
initialize_matrix(b, N, N);
/* The elements of the first column */
// a[0] = 1;
// a[1] = 2;
// a[2] = 3;
// a[3] = 4;
// /* The elements of the second column */
// a[N] = 1;
// a[N + 1] = 1;
// a[N + 2] = 2;
// a[N + 3] = 1;
// /* The elements of the third column */
// a[N * 2] = 3;
// a[N * 2 + 1] = 1;
// a[N * 2 + 2] = 2;
// a[N * 2 + 3] = 1;
// /* The elements of the fourth column */
// a[N * 3] = 5;
// a[N * 3 + 1] = 4;
// a[N * 3 + 2] = 7;
// a[N * 3 + 3] = 3;
// ////////////Second input matrix
// /* The elements of the first column */
// b[0] = 1;
// b[1] = 2;
// b[2] = 3;
// b[3] = 4;
// /* The elements of the second column */
// b[N] = 5;
// b[N + 1] = 1;
// b[N + 2] = 8;
// b[N + 3] = 1;
// /* The elements of the third column */
// b[N * 2] = 3;
// b[N * 2 + 1] = 1;
// b[N * 2 + 2] = 2;
// b[N * 2 + 3] = 1;
// /* The elements of the fourth column */
// b[N * 3] = 5;
// b[N * 3 + 1] = 4;
// b[N * 3 + 2] = 7;
// b[N * 3 + 3] = 3;
// print_matrix(a, N, N, "input Matrix a");
// print_matrix(b, N, N, "input Matrix b");
std::cout << '\n';
// copy m and v_in into device memory, time it as well
auto d2h_start = std::chrono::high_resolution_clock::now();
hipMemcpy(d_a, a, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * N * N, hipMemcpyHostToDevice);
auto d2h_end = std::chrono::high_resolution_clock::now();
auto d2h_duration = std::chrono::duration_cast<std::chrono::microseconds>(d2h_end - d2h_start).count();
// the naive2D kernel execution .....timing....
std::cout << "STARTING NAIVE" << std::endl;
auto naive_exec_start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiplication), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, d_a, d_b, d_c_out_naive, N);
hipDeviceSynchronize();
std::cout << "FINISHED NAIVE" << std::endl;
// since the kernels are executed asynchronously, need to sync so that we can get accurate timing
auto naive_exec_end = std::chrono::high_resolution_clock::now();
auto naive_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(naive_exec_end - naive_exec_start).count();
// // copy d_v_out_naive back into host
auto h2d_start = std::chrono::high_resolution_clock::now();
hipMemcpy(c_out_naive, d_c_out_naive, sizeof(float) * N * N, hipMemcpyDeviceToHost);
auto h2d_end = std::chrono::high_resolution_clock::now();
auto h2d_duration = std::chrono::duration_cast<std::chrono::microseconds>(h2d_end - h2d_start).count();
// // get total inclusive time
auto gpu_transfer_total_duration = h2d_duration + d2h_duration;
hipblasCreate(&cublas_handle);
// hipblasSetMatrix(M, N, sizeof(float), m, M, )
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
int lda=N,ldb=N,ldc=N;
// try timing cublas (not timing inclusive times, although I am copying back out to host as well)
auto cublas_exec_start = std::chrono::high_resolution_clock::now();
hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N, N, N, alpha, d_a, lda, d_b, ldb, beta, d_c_out_cublas, ldc);
auto cublas_exec_end = std::chrono::high_resolution_clock::now();
auto cublas_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(cublas_exec_end - cublas_exec_start).count();
// copy the cublas device vector back out to host
hipMemcpy(c_out_cublas, d_c_out_cublas, sizeof(float) *N* N, hipMemcpyDeviceToHost);
std::cout << "Comparing output vectors:\n";
//calculating the residuals
//Cublas data need to be transposed to compare with my kernel implementation
//we can use GPU transpose, but here for simplicity I didn't go through that
float rse{ 0.0f };
transpose( c_out_cublas, c_transposed_cublas , N, N);
rse = residual(N*N,c_out_naive,c_transposed_cublas) ;
std::cout << "ERROR: " << rse << std::endl;
//print_test(c_out_naive,N);
// print_matrix(c_out_naive, N, N, "output naive Matrix c");
//print_matrix(c_transposed_cublas, N, N, "output cublas Matrix c");
std::cout <<
" Naive Total Time , Naive Execution Time,\n";
std::cout <<naive_exec_duration +
gpu_transfer_total_duration << ", " << naive_exec_duration
<< '\n';
//clean up
hipblasDestroy(cublas_handle);
hipFree(d_c_out_cublas);
hipFree(d_c_out_naive);
hipFree(d_b);
hipFree(d_a);
delete[] c_out_cublas;
delete[] c_out_naive;
delete[] a;
delete[] b;
return 0;
}
| 3efa55e69dec82b57dcd2d3aa9ab1b5d8bf18886.cu |
/*
* CSS-535 Program 2: Matrix Multiplication on GPU
* Author: Afrooz Rahmati
* 02-23-2021
*
* special thanks to Tony Varela, I worked with him on previous lab for
* vector-matrix multiplication, so I used some of our previous implementations
* our previous work is here (private): https://github.com/valenotary/CSS-535-Lab-03-GEMV
* Compile command = nvcc -std=c++11 naive1D.cu -lcublas -o naive1D
* Run command with profiling = ./naive1D matrix_size number_of_Blocks number_of_threads
* Run example : ./naive1D 1000 1000 1000
*/
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cublas_v2.h> // as a benchmark
#include <random> // for random initialization
#include <chrono> // timing
#include <iostream> // for output
using namespace std;
using namespace std::chrono;
/*
* Functionality: Initializing Matrix with float values other than 0 and 1
* input:
m = the input matrix
M, N = matrix dimensions M X N
output:
None
*/
void initialize_matrix(float *m, const int M, const int N) {
//std::default_random_engine dre;
// dre.seed(3); // seeded for reproducibility
// const std::uniform_real_distribution<float> uniform_dist(-10, 10); // uniform distribution [-10, 10]
for (size_t i=0; i < M; i++)
//just to ignore 0 and 1 for error handling
//This will generate a number from some 1 to some arbitrary HI=99998
for (size_t j=0; j < N; j++)
m[i * M + j] = 1 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(9)));
}
/*
functionality: naive Matrix Multiplication Implementation a*b=c
input parameters:
a : the input matrix
b : the input matrix
c : the result matrix
N : Matrices dimension( number of elements )
consideration : the matrix size is square */
__global__ void multiplication(float *a, float *b, float *c, const int n){
int col = blockIdx.x * blockDim.x + threadIdx.x; //location in c
int row = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0f;
if( col < n && row < n)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = sum;
}
}
//functionality: Computing the max residual between two matrices
//pre condition : M as matrices size ,resultVector2 and resultVector are two matrixes stores in 1D dimensions
//post condition: return the residual value as double precision
float residual(int M, float* resultVector2, float* resultVector) {
//float* k = new float[M];
float max_res =0.0;
float accum = 0.0;
for (int i = 0; i < M; ++i) {
accum = fabs(resultVector2[i] - resultVector[i]);
max_res=max(max_res,accum) ;
}
return max_res;
}
/*
* Functionality: Printing Matrix values for Debugging purpose only
* input:
a = the input matrix
M, N = matrix dimensions M X N
d = The text before each value
output:
None
*/
void print_matrix(float *a, const int N, const int M, char *d) {
int i, j;
for(i=0; i<N; i++)
{
printf("\n%s[%d]:", d, i);
for (j=0; j<M; j++)
printf("\t%6.4f", a[i*M+j]);
}
printf("\n");
}
/*
* Functionality: transpose the matrix - for comparing the result with cublas
* input:
src = the input matrix
dst = expected transpose matrix
M, N = matrix dimensions M X N
output:
None
*/
void transpose(float *src, float *dst, const int N, const int M) {
for(int n = 0; n<N*M; n++) {
int i = n/N;
int j = n%N;
dst[n] = src[M*j + i];
}
}
int main(int argc, char **argv) {
// This program should only take in the M and N dimensions; within the program, we figure out the execution configurations ourselves
if( argc != 4 )
{
std::cout << "please enter matrix size , number of blocks ,number of threads per block" << std::endl;
return 0;
}
const size_t N = atoi(argv[1]) ;
// let's create the grid / block configuration, but just really simply.
dim3 blockPerGrid (atoi(argv[2]) , atoi(argv[2]) );
dim3 threadPerBlock (atoi(argv[3]) , atoi(argv[3]) );
// cublas declarations
cublasHandle_t cublas_handle;
// allocate host memory
float *a = (float*)malloc( N * N * sizeof(float) );
float *b = (float*)malloc( N * N * sizeof(float) );
float *c_out_naive= (float*)malloc( N * N * sizeof(float) );
float *c_out_cublas= (float*)malloc( N * N * sizeof(float) );
float *c_transposed_cublas= (float*)malloc( N * N * sizeof(float) );
// allocate device memory
float *d_a, *d_b, *d_c_out_naive, *d_c_out_cublas;
cudaMalloc((void**)&d_a, N * N * sizeof(float));
cudaMalloc((void**)&d_b, N * N * sizeof(float));
cudaMalloc((void**)&d_c_out_naive, N * N * sizeof(float));
cudaMalloc((void**)&d_c_out_cublas, N * N * sizeof(float));
// initialize host array with random data
initialize_matrix(a, N, N);
initialize_matrix(b, N, N);
/* The elements of the first column */
// a[0] = 1;
// a[1] = 2;
// a[2] = 3;
// a[3] = 4;
// /* The elements of the second column */
// a[N] = 1;
// a[N + 1] = 1;
// a[N + 2] = 2;
// a[N + 3] = 1;
// /* The elements of the third column */
// a[N * 2] = 3;
// a[N * 2 + 1] = 1;
// a[N * 2 + 2] = 2;
// a[N * 2 + 3] = 1;
// /* The elements of the fourth column */
// a[N * 3] = 5;
// a[N * 3 + 1] = 4;
// a[N * 3 + 2] = 7;
// a[N * 3 + 3] = 3;
// ////////////Second input matrix
// /* The elements of the first column */
// b[0] = 1;
// b[1] = 2;
// b[2] = 3;
// b[3] = 4;
// /* The elements of the second column */
// b[N] = 5;
// b[N + 1] = 1;
// b[N + 2] = 8;
// b[N + 3] = 1;
// /* The elements of the third column */
// b[N * 2] = 3;
// b[N * 2 + 1] = 1;
// b[N * 2 + 2] = 2;
// b[N * 2 + 3] = 1;
// /* The elements of the fourth column */
// b[N * 3] = 5;
// b[N * 3 + 1] = 4;
// b[N * 3 + 2] = 7;
// b[N * 3 + 3] = 3;
// print_matrix(a, N, N, "input Matrix a");
// print_matrix(b, N, N, "input Matrix b");
std::cout << '\n';
// copy m and v_in into device memory, time it as well
auto d2h_start = std::chrono::high_resolution_clock::now();
cudaMemcpy(d_a, a, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N * N, cudaMemcpyHostToDevice);
auto d2h_end = std::chrono::high_resolution_clock::now();
auto d2h_duration = std::chrono::duration_cast<std::chrono::microseconds>(d2h_end - d2h_start).count();
// the naive2D kernel execution .....timing....
std::cout << "STARTING NAIVE" << std::endl;
auto naive_exec_start = std::chrono::high_resolution_clock::now();
multiplication<<<blockPerGrid, threadPerBlock>>>( d_a, d_b, d_c_out_naive, N);
cudaDeviceSynchronize();
std::cout << "FINISHED NAIVE" << std::endl;
// since the kernels are executed asynchronously, need to sync so that we can get accurate timing
auto naive_exec_end = std::chrono::high_resolution_clock::now();
auto naive_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(naive_exec_end - naive_exec_start).count();
// // copy d_v_out_naive back into host
auto h2d_start = std::chrono::high_resolution_clock::now();
cudaMemcpy(c_out_naive, d_c_out_naive, sizeof(float) * N * N, cudaMemcpyDeviceToHost);
auto h2d_end = std::chrono::high_resolution_clock::now();
auto h2d_duration = std::chrono::duration_cast<std::chrono::microseconds>(h2d_end - h2d_start).count();
// // get total inclusive time
auto gpu_transfer_total_duration = h2d_duration + d2h_duration;
cublasCreate(&cublas_handle);
// cublasSetMatrix(M, N, sizeof(float), m, M, )
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
int lda=N,ldb=N,ldc=N;
// try timing cublas (not timing inclusive times, although I am copying back out to host as well)
auto cublas_exec_start = std::chrono::high_resolution_clock::now();
cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, N, N, N, alpha, d_a, lda, d_b, ldb, beta, d_c_out_cublas, ldc);
auto cublas_exec_end = std::chrono::high_resolution_clock::now();
auto cublas_exec_duration = std::chrono::duration_cast<std::chrono::microseconds>(cublas_exec_end - cublas_exec_start).count();
// copy the cublas device vector back out to host
cudaMemcpy(c_out_cublas, d_c_out_cublas, sizeof(float) *N* N, cudaMemcpyDeviceToHost);
std::cout << "Comparing output vectors:\n";
//calculating the residuals
//Cublas data need to be transposed to compare with my kernel implementation
//we can use GPU transpose, but here for simplicity I didn't go through that
float rse{ 0.0f };
transpose( c_out_cublas, c_transposed_cublas , N, N);
rse = residual(N*N,c_out_naive,c_transposed_cublas) ;
std::cout << "ERROR: " << rse << std::endl;
//print_test(c_out_naive,N);
// print_matrix(c_out_naive, N, N, "output naive Matrix c");
//print_matrix(c_transposed_cublas, N, N, "output cublas Matrix c");
std::cout <<
" Naive Total Time , Naive Execution Time,\n";
std::cout <<naive_exec_duration +
gpu_transfer_total_duration << ", " << naive_exec_duration
<< '\n';
//clean up
cublasDestroy(cublas_handle);
cudaFree(d_c_out_cublas);
cudaFree(d_c_out_naive);
cudaFree(d_b);
cudaFree(d_a);
delete[] c_out_cublas;
delete[] c_out_naive;
delete[] a;
delete[] b;
return 0;
}
|
24267bc6bf4d6b63dc4fdc850a88ac01eac81139.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by zhangjian on 19-6-22.
//
#include "../common/book.h"
#include "hip/hip_runtime.h"
#include <iostream>
#define SIZE (10*1024*1024)
float cuda_malloc_test(int size, bool up){
int *a, *dev_a;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
a = (int*)malloc(size * sizeof(*a));
hipMalloc((void**)&dev_a, size * sizeof(*dev_a));
hipEventRecord(start, 0);
for(int i = 0; i < 100; i++){
if(up)
hipMemcpy(dev_a, a, size * sizeof(*a), hipMemcpyHostToDevice);
else
hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
free(a);
hipFree(dev_a);
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsedTime;
}
float cuda_malloc_host_test(int size, bool up){
int *a, *dev_a;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipHostMalloc( (void**)&a, size * sizeof(*a), hipHostMallocDefault);
hipMalloc((void**)&dev_a, size * sizeof(*dev_a));
hipEventRecord(start, 0);
for(int i = 0; i < 100; i++){
if(up)
hipMemcpy(dev_a, a, size * sizeof(*a), hipMemcpyHostToDevice);
else
hipMemcpy(a, dev_a, size * sizeof(*dev_a), hipMemcpyDeviceToHost);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipHostFree(a);
hipFree(dev_a);
hipEventDestroy(start);
hipEventDestroy(stop);
return elapsedTime;
}
int main(void){
float elapsedTime;
float MB = (float)100*SIZE* sizeof(int)/1024/1024;
elapsedTime = cuda_malloc_test(SIZE, true);
std::cout<<elapsedTime<<std::endl;
elapsedTime = cuda_malloc_host_test(SIZE, true);
std::cout<<elapsedTime<<std::endl;
} | 24267bc6bf4d6b63dc4fdc850a88ac01eac81139.cu | //
// Created by zhangjian on 19-6-22.
//
#include "../common/book.h"
#include "cuda.h"
#include <iostream>
#define SIZE (10*1024*1024)
float cuda_malloc_test(int size, bool up){
int *a, *dev_a;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
a = (int*)malloc(size * sizeof(*a));
cudaMalloc((void**)&dev_a, size * sizeof(*dev_a));
cudaEventRecord(start, 0);
for(int i = 0; i < 100; i++){
if(up)
cudaMemcpy(dev_a, a, size * sizeof(*a), cudaMemcpyHostToDevice);
else
cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
free(a);
cudaFree(dev_a);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
float cuda_malloc_host_test(int size, bool up){
int *a, *dev_a;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaHostAlloc( (void**)&a, size * sizeof(*a), cudaHostAllocDefault);
cudaMalloc((void**)&dev_a, size * sizeof(*dev_a));
cudaEventRecord(start, 0);
for(int i = 0; i < 100; i++){
if(up)
cudaMemcpy(dev_a, a, size * sizeof(*a), cudaMemcpyHostToDevice);
else
cudaMemcpy(a, dev_a, size * sizeof(*dev_a), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaFreeHost(a);
cudaFree(dev_a);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return elapsedTime;
}
int main(void){
float elapsedTime;
float MB = (float)100*SIZE* sizeof(int)/1024/1024;
elapsedTime = cuda_malloc_test(SIZE, true);
std::cout<<elapsedTime<<std::endl;
elapsedTime = cuda_malloc_host_test(SIZE, true);
std::cout<<elapsedTime<<std::endl;
} |
fd56837a86fbf3e4a9de781a78bdd93806373bd2.hip | // !!! This is a file automatically generated by hipify!!!
// Simple vector addition, from the samples provided in the CUDA SDK.
// Author: Allen Porter <[email protected]>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void VecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char**argv)
{
int N = 10;
size_t size = N * sizeof(float);
// Input; Host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
for (int i = 0; i < N; i++) {
h_A[i] = i;
h_B[i] = i;
}
// Device memory
float* d_A;
hipMalloc((void**)&d_A, size);
float* d_B;
hipMalloc((void**)&d_B, size);
float* d_C;
hipMalloc((void**)&d_C, size);
// Copy from host to device
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
hipLaunchKernelGGL(( VecAdd), dim3(1), dim3(N), 0, 0, d_A, d_B, d_C);
float* h_C = (float*)malloc(size);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%0.f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < N; i++) {
printf("%0.f ", h_B[i]);
}
printf("\n");
for (int i = 0; i < N; i++) {
printf("%0.f ", h_C[i]);
}
printf("\n");
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
| fd56837a86fbf3e4a9de781a78bdd93806373bd2.cu | // Simple vector addition, from the samples provided in the CUDA SDK.
// Author: Allen Porter <[email protected]>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
__global__ void VecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char**argv)
{
int N = 10;
size_t size = N * sizeof(float);
// Input; Host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
for (int i = 0; i < N; i++) {
h_A[i] = i;
h_B[i] = i;
}
// Device memory
float* d_A;
cudaMalloc((void**)&d_A, size);
float* d_B;
cudaMalloc((void**)&d_B, size);
float* d_C;
cudaMalloc((void**)&d_C, size);
// Copy from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
VecAdd<<<1, N>>>(d_A, d_B, d_C);
float* h_C = (float*)malloc(size);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%0.f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < N; i++) {
printf("%0.f ", h_B[i]);
}
printf("\n");
for (int i = 0; i < N; i++) {
printf("%0.f ", h_C[i]);
}
printf("\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
edb59a4c353826de18d49f34cbbdef0bddc21266.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: diffusion
* file: diffusion.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p110";
const char* studentName = "Shrikant Vinchurkar";
const int studentID = 03636145;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* diffuse_linear_isotrop_shared(const float *d_input, ... )
* diffuse_linear_isotrop_shared(const float3 *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float3 *d_input, ... )
* compute_tv_diffusivity_shared
* compute_tv_diffusivity_joined_shared
* compute_tv_diffusivity_separate_shared
* jacobi_shared(float *d_output, ... )
* jacobi_shared(float3 *d_output, ... )
* sor_shared(float *d_output, ... )
* sor_shared(float3 *d_output, ... )
*
\****************************************************************************/
#define DIFF_BW 16
#define DIFF_BH 16
#define TV_EPSILON 0.1f
#include "diffusion.cuh"
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
//----------------------------------------------------------------------------
// Linear Diffusion
//----------------------------------------------------------------------------
// mode 0 gray: linear diffusion
__global__ void diffuse_linear_isotrop_shared(
const float *d_input,
float *d_output,
float timeStep,
int nx, int ny,
size_t pitch)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// ### implement me ###
// calculating linear isotropic diffusion
if( x < nx && y < ny )//guards
{
float isoD = u[tx+1][ty] + u[tx-1][ty] + u[tx][ty+1] + u[tx][ty-1] - 4*u[tx][ty];
d_output[idx] = u[tx][ty] + timeStep * isoD;
}
}
// mode 0 interleaved: linear diffusion
__global__ void diffuse_linear_isotrop_shared
(
const float3 *d_input,
float3 *d_output,
float timeStep,
int nx, int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
imgValue = *( (float3*)imgP );
u[tx][ty] = imgValue;
if (x == 0) u[0][ty] = imgValue;
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = imgValue;
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = imgValue;
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = imgValue;
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// ### implement me ###
// calculating linear isotropic diffusion
if( x < nx && y < ny )//guards
{
imgValue.x = u[tx][ty].x + timeStep * (u[tx+1][ty].x + u[tx-1][ty].x + u[tx][ty+1].x + u[tx][ty-1].x - 4*u[tx][ty].x);
imgValue.y = u[tx][ty].y + timeStep * (u[tx+1][ty].y + u[tx-1][ty].y + u[tx][ty+1].y + u[tx][ty-1].y - 4*u[tx][ty].y);
imgValue.z = u[tx][ty].z + timeStep * (u[tx+1][ty].z + u[tx-1][ty].z + u[tx][ty+1].z + u[tx][ty-1].z - 4*u[tx][ty].z);
*( (float3 *)imgO ) = imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - explicit scheme
//----------------------------------------------------------------------------
// mode 1 gray: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float *d_input,
const float *d_diffusivity,
float *d_output,
float timeStep,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// ### implement me ###
if( x < nx && y < ny )//guards
{
float phiR = 0.5f *( g[tx+1][ty] + g[tx][ty] );
float phiL = 0.5f *( g[tx-1][ty] + g[tx][ty] );
float phiU = 0.5f *( g[tx][ty-1] + g[tx][ty] );
float phiD = 0.5f *( g[tx][ty+1] + g[tx][ty] );
float sum = phiR * u[tx+1][ty] + phiL * u[tx-1][ty] + phiU * u[tx][ty-1] + phiD * u[tx][ty+1] -
( phiR + phiL + phiU + phiD ) * u[tx][ty];
d_output[idx] = u[tx][ty] + timeStep * sum;
}
}
// mode 1 interleaved: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float3 *d_input,
const float3 *d_diffusivity,
float3 *d_output,
float timeStep,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
float3 value;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// ### implement me ###
if( x < nx && y < ny )//guards
{
// calculate value.x
float phiR_x = 0.5f *( g[tx+1][ty].x + g[tx][ty].x );
float phiL_x = 0.5f *( g[tx-1][ty].x + g[tx][ty].x );
float phiU_x = 0.5f *( g[tx][ty-1].x + g[tx][ty].x );
float phiD_x = 0.5f *( g[tx][ty+1].x + g[tx][ty].x );
float sum_x = phiR_x * u[tx+1][ty].x + phiL_x * u[tx-1][ty].x +
phiU_x * u[tx][ty-1].x + phiD_x * u[tx][ty+1].x -
( phiR_x + phiL_x + phiU_x + phiD_x ) * u[tx][ty].x;
value.x = u[tx][ty].x + timeStep * sum_x;
// calculate value.y
float phiR_y = 0.5f *( g[tx+1][ty].y + g[tx][ty].y );
float phiL_y = 0.5f *( g[tx-1][ty].y + g[tx][ty].y );
float phiU_y = 0.5f *( g[tx][ty-1].y + g[tx][ty].y );
float phiD_y = 0.5f *( g[tx][ty+1].y + g[tx][ty].y );
float sum_y = phiR_y * u[tx+1][ty].y + phiL_y * u[tx-1][ty].y +
phiU_y * u[tx][ty-1].y + phiD_y * u[tx][ty+1].y -
( phiR_y + phiL_y + phiU_y + phiD_y ) * u[tx][ty].y;
value.y = u[tx][ty].y + timeStep * sum_y;
// calculate value.z
float phiR_z = 0.5f *( g[tx+1][ty].z + g[tx][ty].z );
float phiL_z = 0.5f *( g[tx-1][ty].z + g[tx][ty].z );
float phiU_z = 0.5f *( g[tx][ty-1].z + g[tx][ty].z );
float phiD_z = 0.5f *( g[tx][ty+1].z + g[tx][ty].z );
float sum_z = phiR_z * u[tx+1][ty].z + phiL_z * u[tx-1][ty].z +
phiU_z * u[tx][ty-1].z + phiD_z * u[tx][ty+1].z -
( phiR_z + phiL_z + phiU_z + phiD_z ) * u[tx][ty].z;
value.z = u[tx][ty].z + timeStep * sum_z;
*( (float3 *) imgO) = value;
}
}
// diffusivity computation for modes 1-3 gray
__global__ void compute_tv_diffusivity_shared
(
const float *d_input,
float *d_output,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// make use of the constant TV_EPSILON
// ### implement me ###
if( x < nx && y < ny )//guards
{
// calculate gradient magnitude
float derX = 0.5f * (u[tx+1][ty] - u[tx-1][ty]);
float derY = 0.5f * (u[tx][ty+1] - u[tx][ty-1]);
float temp = sqrt( derX * derX + derY * derY + TV_EPSILON );
d_output[idx] = 1 / temp;
}
}
/*! Computes a joined diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g((R+G+B)/3),g((R+G+B)/3),g((R+G+B)/3))
* */
__global__ void compute_tv_diffusivity_joined_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
// ### implement me ###
/* (g_R,g_G,g_B)(R,G,B) :=
* (g((R+G+B)/3),g((R+G+B)/3),g((R+G+B)/3))*/
if( x < nx && y < ny )//guards
{
// calculate g((R+G+B)/3)
float derX = 0.5f * ((u[tx+1][ty].x + u[tx+1][ty].y + u[tx+1][ty].z)/3 -
(u[tx-1][ty].x + u[tx-1][ty].y + u[tx-1][ty].z)/3 );
float derY = 0.5f * ((u[tx][ty+1].x + u[tx][ty+1].y + u[tx][ty+1].z)/3 -
(u[tx][ty-1].x + u[tx][ty-1].y + u[tx][ty-1].z)/3);
float temp = 1 / (sqrt( derX * derX + derY * derY + TV_EPSILON ));
imgValue.x = temp;
imgValue.y = temp;
imgValue.z = temp;
*( (float3 *)imgO ) = imgValue;
}
}
/*! Computes a separate diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g(R),g(G),g(B))
* */
__global__ void compute_tv_diffusivity_separate_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[threadIdx.x][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
// ### implement me ###
if( x < nx && y < ny )//guards
{
// calculate gradient magnitude
// calculate g(R)
float derX_x = 0.5f * (u[tx+1][ty].x - u[tx-1][ty].x);
float derY_x = 0.5f * (u[tx][ty+1].x - u[tx][ty-1].x);
float temp_x = sqrt( derX_x * derX_x + derY_x * derY_x + TV_EPSILON );
imgValue.x = 1 / temp_x;
// calculate g(G)
float derX_y = 0.5f * (u[tx+1][ty].y - u[tx-1][ty].y);
float derY_y = 0.5f * (u[tx][ty+1].y - u[tx][ty-1].y);
float temp_y = sqrt( derX_y * derX_y + derY_y * derY_y + TV_EPSILON );
imgValue.y = 1 / temp_y;
// calculate g(B)
float derX_z = 0.5f * (u[tx+1][ty].z - u[tx-1][ty].z);
float derY_z = 0.5f * (u[tx][ty+1].z - u[tx][ty-1].z);
float temp_z = sqrt( derX_z * derX_z + derY_z * derY_z + TV_EPSILON );
imgValue.z = 1 / temp_z;
*( (float3 *)imgO ) = imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Jacobi scheme
//----------------------------------------------------------------------------
// mode 2 gray: Jacobi solver
__global__ void jacobi_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float weight,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// ### implement me ###
// dnt calculate A - waste of time & space
if (x < nx && y < ny) //guards
{
// setting boundary values to 0 to avoid corruption of Jacobi scheme
float phiR = ( x == nx-1) ? 0.0f : 0.5f *( g[tx+1][ty] + g[tx][ty] );
float phiL = ( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty] + g[tx][ty] );
float phiU = ( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1] + g[tx][ty] );
float phiD = ( y == ny-1) ? 0.0f : 0.5f *( g[tx][ty+1] + g[tx][ty] );
float Aii = ( 1 + (phiR + phiL + phiU + phiD ) * weight) ;
float sumN = weight* ( phiR * u[tx+1][ty] + phiL * u[tx-1][ty] +
phiU * u[tx][ty-1] + phiD * u[tx][ty+1]);
d_output[idx] = (d_original[idx] + sumN) / Aii;
}
}
// mode 2 interleaved: Jacobi solver
__global__ void jacobi_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float weight,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const char* imgOut = (char*)d_output + y*pitchBytes + x*sizeof(float3);
const char* imgOrig = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// ### implement me ###
if (x < nx && y < ny) //guards
{
// setting boundary values to 0 to avoid corruption of Jacobi scheme
float phiR_x =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].x + g[tx][ty].x );
float phiL_x =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].x + g[tx][ty].x );
float phiU_x =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].x + g[tx][ty].x );
float phiD_x =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].x + g[tx][ty].x );
float Aii_x = ( 1 + (phiR_x + phiL_x + phiU_x + phiD_x ) * weight) ;
float sumN_x = weight* ( phiR_x * u[tx+1][ty].x + phiL_x * u[tx-1][ty].x +
phiU_x * u[tx][ty-1].x + phiD_x * u[tx][ty+1].x);
imgValue.x = (*((float*)imgOrig) + sumN_x)/Aii_x;
float phiR_y =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].y + g[tx][ty].y );
float phiL_y =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].y + g[tx][ty].y );
float phiU_y =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].y + g[tx][ty].y );
float phiD_y =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].y + g[tx][ty].y );
float Aii_y = ( 1 + (phiR_y + phiL_y + phiU_y + phiD_y ) * weight) ;
float sumN_y = weight* ( phiR_y * u[tx+1][ty].y + phiL_y * u[tx-1][ty].y +
phiU_y * u[tx][ty-1].y + phiD_y * u[tx][ty+1].y);
imgValue.y = (*((float*)imgOrig+1) + sumN_y)/Aii_y;
float phiR_z =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].z + g[tx][ty].z );
float phiL_z =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].z + g[tx][ty].z );
float phiU_z =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].z + g[tx][ty].z );
float phiD_z =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].z + g[tx][ty].z );
float Aii_z = ( 1 + (phiR_z + phiL_z + phiU_z + phiD_z ) * weight) ;
float sumN_z = weight* ( phiR_z * u[tx+1][ty].z + phiL_z * u[tx-1][ty].z +
phiU_z * u[tx][ty-1].z + phiD_z * u[tx][ty+1].z);
imgValue.z = (*((float*)imgOrig+2) + sumN_z)/Aii_z;
*((float3*)imgOut) =imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Successive Over-Relaxation (SOR)
//----------------------------------------------------------------------------
// mode 3 gray: SOR solver
__global__ void sor_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float weight,
float overrelaxation,
int nx,
int ny,
size_t pitch,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// ### implement me ###
// dnt calculate A - waste of time & space
if (x < nx && y < ny)//guards
{
// implementing the checkerboard pattern
if( (tx+ty) % 2 == red) // allow only threads with even sum if red==0 & odd sum if red==1
{
float phiR =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty] + g[tx][ty] );
float phiL =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty] + g[tx][ty] );
float phiU =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1] + g[tx][ty] );
float phiD =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1] + g[tx][ty] );
float Aii = ( 1 + (phiR + phiL + phiU + phiD ) * weight) ;
float sumNU = weight * ( phiR * u[tx+1][ty] + phiU * u[tx][ty-1] );
float sumNL = weight * ( phiL * u[tx-1][ty] + phiD * u[tx][ty+1] );
d_output[idx] = (1-overrelaxation) * u[tx][ty] +
(d_original[idx] + sumNU + sumNL) * overrelaxation / Aii;
}
}
}
// mode 3 interleaved: SOR solver
__global__ void sor_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float weight,
float overrelaxation,
int nx,
int ny,
size_t pitchBytes,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const char* imgOut = (char*)d_output + y*pitchBytes + x*sizeof(float3);
const char* imgOrig = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// ### implement me ###
if (x < nx && y < ny) //guards
{
// implementing the checkerboard pattern
if( (tx+ty) % 2 == red) // allow only threads with even sum if red==0 & odd sum if red==1
{
// calculate imgValue.x
float phiR_x =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].x + g[tx][ty].x );
float phiL_x =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].x + g[tx][ty].x );
float phiU_x =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].x + g[tx][ty].x );
float phiD_x =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].x + g[tx][ty].x );
float Aii_x = ( 1 + (phiR_x + phiL_x + phiU_x + phiD_x ) * weight) ;
float sumNU_x = weight * ( phiR_x * u[tx+1][ty].x + phiU_x * u[tx][ty-1].x );
float sumNL_x = weight * ( phiL_x * u[tx-1][ty].x + phiD_x * u[tx][ty+1].x );
imgValue.x = (1-overrelaxation) * u[tx][ty].x +
( *((float *) imgOrig)+ sumNU_x + sumNL_x) * overrelaxation / Aii_x;
// calculate imgValue.y
float phiR_y =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].y + g[tx][ty].y );
float phiL_y =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].y + g[tx][ty].y );
float phiU_y =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].y + g[tx][ty].y );
float phiD_y =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].y + g[tx][ty].y );
float Aii_y = ( 1 + (phiR_y + phiL_y + phiU_y + phiD_y ) * weight) ;
float sumNU_y = weight * ( phiR_y * u[tx+1][ty].y + phiU_y * u[tx][ty-1].y );
float sumNL_y = weight * ( phiL_y * u[tx-1][ty].y + phiD_y * u[tx][ty+1].y );
imgValue.y = (1-overrelaxation) * u[tx][ty].y +
( *((float *) imgOrig + 1)+ sumNU_y + sumNL_y) * overrelaxation / Aii_y;
// calculate imgValue.z
float phiR_z =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].z + g[tx][ty].z );
float phiL_z =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].z + g[tx][ty].z );
float phiU_z =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].z + g[tx][ty].z );
float phiD_z =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].z + g[tx][ty].z );
float Aii_z = ( 1 + (phiR_z + phiL_z + phiU_z + phiD_z ) * weight) ;
float sumNU_z = weight * ( phiR_z * u[tx+1][ty].z + phiU_z * u[tx][ty-1].z );
float sumNL_z = weight * ( phiL_z * u[tx-1][ty].z + phiD_z * u[tx][ty+1].z );
imgValue.z = (1-overrelaxation) * u[tx][ty].z +
( *((float *) imgOrig + 2)+ sumNU_z + sumNL_z) * overrelaxation / Aii_z;
*((float3*) imgOut ) = imgValue;
}
}
}
//----------------------------------------------------------------------------
// Host function
//----------------------------------------------------------------------------
void gpu_diffusion
(
const float *input,
float *output,
int nx, int ny, int nc,
float timeStep,
int iterations,
float weight,
int lagged_iterations,
float overrelaxation,
int mode,
bool jointDiffusivity
)
{
int i,j;
size_t pitchF1, pitchBytesF1, pitchBytesF3;
float *d_input = 0;
float *d_output = 0;
float *d_diffusivity = 0;
float *d_original = 0;
float *temp = 0;
dim3 dimGrid((int)ceil((float)nx/DIFF_BW), (int)ceil((float)ny/DIFF_BH));
dim3 dimBlock(DIFF_BW,DIFF_BH);
// Allocation of GPU Memory
if (nc == 1) {
cutilSafeCall( hipMallocPitch( (void**)&(d_input), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( hipMallocPitch( (void**)&(d_output), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode) cutilSafeCall( hipMallocPitch( (void**)&(d_diffusivity), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode >= 2) cutilSafeCall( hipMallocPitch( (void**)&(d_original), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( hipMemcpy2D(d_input, pitchBytesF1, input, nx*sizeof(float), nx*sizeof(float), ny, hipMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( hipMemcpy2D(d_original, pitchBytesF1, d_input, pitchBytesF1, nx*sizeof(float), ny, hipMemcpyDeviceToDevice) );
pitchF1 = pitchBytesF1/sizeof(float);
} else if (nc == 3) {
cutilSafeCall( hipMallocPitch( (void**)&(d_input), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( hipMallocPitch( (void**)&(d_output), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode) cutilSafeCall( hipMallocPitch( (void**)&(d_diffusivity), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode >= 2) cutilSafeCall( hipMallocPitch( (void**)&(d_original), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( hipMemcpy2D(d_input, pitchBytesF3, input, nx*sizeof(float3), nx*sizeof(float3), ny, hipMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( hipMemcpy2D(d_original, pitchBytesF3, d_input, pitchBytesF3, nx*sizeof(float3), ny, hipMemcpyDeviceToDevice) );
}
// Execution of the Diffusion Kernel
if (mode == 0) { // linear isotropic diffision
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( diffuse_linear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_output, timeStep, nx, ny, pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( diffuse_linear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 1) { // nonlinear isotropic diffusion
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( compute_tv_diffusivity_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( diffuse_nonlinear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,d_output,timeStep,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
hipLaunchKernelGGL(( compute_tv_diffusivity_joined_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
hipLaunchKernelGGL(( compute_tv_diffusivity_separate_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( diffuse_nonlinear_isotrop_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_input,(float3*)d_diffusivity,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 2) { // Jacobi-method
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( compute_tv_diffusivity_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
for (j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( jacobi_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_output,d_input,d_original,
d_diffusivity,weight,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
hipLaunchKernelGGL(( compute_tv_diffusivity_joined_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
hipLaunchKernelGGL(( compute_tv_diffusivity_separate_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
for (j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( jacobi_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_output,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
}
else if (mode == 3) { // Successive Over Relaxation (Gauss-Seidel with extrapolation)
if (nc == 1) {
for (i=0;i<iterations;i++) {
hipLaunchKernelGGL(( compute_tv_diffusivity_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( hipDeviceSynchronize() );
for(j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 0);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 1);
cutilSafeCall( hipDeviceSynchronize() );
}
}
}
if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
hipLaunchKernelGGL(( compute_tv_diffusivity_joined_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
hipLaunchKernelGGL(( compute_tv_diffusivity_separate_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, (float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( hipDeviceSynchronize() );
for (j=0;j<lagged_iterations;j++) {
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 0);
cutilSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0,
(float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 1);
cutilSafeCall( hipDeviceSynchronize() );
}
}
}
}
if (nc == 1) {
if (mode == 3) cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float), d_input, pitchBytesF1, nx*sizeof(float), ny, hipMemcpyDeviceToHost) );
else cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float), d_output, pitchBytesF1, nx*sizeof(float), ny, hipMemcpyDeviceToHost) );
} else if (nc == 3) {
if (mode == 3) cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float3), d_input, pitchBytesF3, nx*sizeof(float3), ny, hipMemcpyDeviceToHost) );
else cutilSafeCall( hipMemcpy2D(output, nx*sizeof(float3), d_output, pitchBytesF3, nx*sizeof(float3), ny, hipMemcpyDeviceToHost) );
}
// clean up
if (d_original) cutilSafeCall( hipFree(d_original) );
if (d_diffusivity) cutilSafeCall( hipFree(d_diffusivity) );
if (d_output) cutilSafeCall( hipFree(d_output) );
if (d_input) cutilSafeCall( hipFree(d_input) );
} | edb59a4c353826de18d49f34cbbdef0bddc21266.cu | /****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: diffusion
* file: diffusion.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p110";
const char* studentName = "Shrikant Vinchurkar";
const int studentID = 03636145;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* diffuse_linear_isotrop_shared(const float *d_input, ... )
* diffuse_linear_isotrop_shared(const float3 *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float *d_input, ... )
* diffuse_nonlinear_isotrop_shared(const float3 *d_input, ... )
* compute_tv_diffusivity_shared
* compute_tv_diffusivity_joined_shared
* compute_tv_diffusivity_separate_shared
* jacobi_shared(float *d_output, ... )
* jacobi_shared(float3 *d_output, ... )
* sor_shared(float *d_output, ... )
* sor_shared(float3 *d_output, ... )
*
\****************************************************************************/
#define DIFF_BW 16
#define DIFF_BH 16
#define TV_EPSILON 0.1f
#include "diffusion.cuh"
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
//----------------------------------------------------------------------------
// Linear Diffusion
//----------------------------------------------------------------------------
// mode 0 gray: linear diffusion
__global__ void diffuse_linear_isotrop_shared(
const float *d_input,
float *d_output,
float timeStep,
int nx, int ny,
size_t pitch)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// ### implement me ###
// calculating linear isotropic diffusion
if( x < nx && y < ny )//guards
{
float isoD = u[tx+1][ty] + u[tx-1][ty] + u[tx][ty+1] + u[tx][ty-1] - 4*u[tx][ty];
d_output[idx] = u[tx][ty] + timeStep * isoD;
}
}
// mode 0 interleaved: linear diffusion
__global__ void diffuse_linear_isotrop_shared
(
const float3 *d_input,
float3 *d_output,
float timeStep,
int nx, int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
imgValue = *( (float3*)imgP );
u[tx][ty] = imgValue;
if (x == 0) u[0][ty] = imgValue;
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = imgValue;
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = imgValue;
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = imgValue;
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// ### implement me ###
// calculating linear isotropic diffusion
if( x < nx && y < ny )//guards
{
imgValue.x = u[tx][ty].x + timeStep * (u[tx+1][ty].x + u[tx-1][ty].x + u[tx][ty+1].x + u[tx][ty-1].x - 4*u[tx][ty].x);
imgValue.y = u[tx][ty].y + timeStep * (u[tx+1][ty].y + u[tx-1][ty].y + u[tx][ty+1].y + u[tx][ty-1].y - 4*u[tx][ty].y);
imgValue.z = u[tx][ty].z + timeStep * (u[tx+1][ty].z + u[tx-1][ty].z + u[tx][ty+1].z + u[tx][ty-1].z - 4*u[tx][ty].z);
*( (float3 *)imgO ) = imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - explicit scheme
//----------------------------------------------------------------------------
// mode 1 gray: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float *d_input,
const float *d_diffusivity,
float *d_output,
float timeStep,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// ### implement me ###
if( x < nx && y < ny )//guards
{
float phiR = 0.5f *( g[tx+1][ty] + g[tx][ty] );
float phiL = 0.5f *( g[tx-1][ty] + g[tx][ty] );
float phiU = 0.5f *( g[tx][ty-1] + g[tx][ty] );
float phiD = 0.5f *( g[tx][ty+1] + g[tx][ty] );
float sum = phiR * u[tx+1][ty] + phiL * u[tx-1][ty] + phiU * u[tx][ty-1] + phiD * u[tx][ty+1] -
( phiR + phiL + phiU + phiD ) * u[tx][ty];
d_output[idx] = u[tx][ty] + timeStep * sum;
}
}
// mode 1 interleaved: nonlinear diffusion
__global__ void diffuse_nonlinear_isotrop_shared
(
const float3 *d_input,
const float3 *d_diffusivity,
float3 *d_output,
float timeStep,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
float3 value;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// ### implement me ###
if( x < nx && y < ny )//guards
{
// calculate value.x
float phiR_x = 0.5f *( g[tx+1][ty].x + g[tx][ty].x );
float phiL_x = 0.5f *( g[tx-1][ty].x + g[tx][ty].x );
float phiU_x = 0.5f *( g[tx][ty-1].x + g[tx][ty].x );
float phiD_x = 0.5f *( g[tx][ty+1].x + g[tx][ty].x );
float sum_x = phiR_x * u[tx+1][ty].x + phiL_x * u[tx-1][ty].x +
phiU_x * u[tx][ty-1].x + phiD_x * u[tx][ty+1].x -
( phiR_x + phiL_x + phiU_x + phiD_x ) * u[tx][ty].x;
value.x = u[tx][ty].x + timeStep * sum_x;
// calculate value.y
float phiR_y = 0.5f *( g[tx+1][ty].y + g[tx][ty].y );
float phiL_y = 0.5f *( g[tx-1][ty].y + g[tx][ty].y );
float phiU_y = 0.5f *( g[tx][ty-1].y + g[tx][ty].y );
float phiD_y = 0.5f *( g[tx][ty+1].y + g[tx][ty].y );
float sum_y = phiR_y * u[tx+1][ty].y + phiL_y * u[tx-1][ty].y +
phiU_y * u[tx][ty-1].y + phiD_y * u[tx][ty+1].y -
( phiR_y + phiL_y + phiU_y + phiD_y ) * u[tx][ty].y;
value.y = u[tx][ty].y + timeStep * sum_y;
// calculate value.z
float phiR_z = 0.5f *( g[tx+1][ty].z + g[tx][ty].z );
float phiL_z = 0.5f *( g[tx-1][ty].z + g[tx][ty].z );
float phiU_z = 0.5f *( g[tx][ty-1].z + g[tx][ty].z );
float phiD_z = 0.5f *( g[tx][ty+1].z + g[tx][ty].z );
float sum_z = phiR_z * u[tx+1][ty].z + phiL_z * u[tx-1][ty].z +
phiU_z * u[tx][ty-1].z + phiD_z * u[tx][ty+1].z -
( phiR_z + phiL_z + phiU_z + phiD_z ) * u[tx][ty].z;
value.z = u[tx][ty].z + timeStep * sum_z;
*( (float3 *) imgO) = value;
}
}
// diffusivity computation for modes 1-3 gray
__global__ void compute_tv_diffusivity_shared
(
const float *d_input,
float *d_output,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const int idx = y*pitch + x;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = d_input[idx-1];
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = d_input[idx+1];
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = d_input[idx-pitch];
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = d_input[idx+pitch];
}
__syncthreads();
// make use of the constant TV_EPSILON
// ### implement me ###
if( x < nx && y < ny )//guards
{
// calculate gradient magnitude
float derX = 0.5f * (u[tx+1][ty] - u[tx-1][ty]);
float derY = 0.5f * (u[tx][ty+1] - u[tx][ty-1]);
float temp = sqrt( derX * derX + derY * derY + TV_EPSILON );
d_output[idx] = 1 / temp;
}
}
/*! Computes a joined diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g((R+G+B)/3),g((R+G+B)/3),g((R+G+B)/3))
* */
__global__ void compute_tv_diffusivity_joined_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[0][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
// ### implement me ###
/* (g_R,g_G,g_B)(R,G,B) :=
* (g((R+G+B)/3),g((R+G+B)/3),g((R+G+B)/3))*/
if( x < nx && y < ny )//guards
{
// calculate g((R+G+B)/3)
float derX = 0.5f * ((u[tx+1][ty].x + u[tx+1][ty].y + u[tx+1][ty].z)/3 -
(u[tx-1][ty].x + u[tx-1][ty].y + u[tx-1][ty].z)/3 );
float derY = 0.5f * ((u[tx][ty+1].x + u[tx][ty+1].y + u[tx][ty+1].z)/3 -
(u[tx][ty-1].x + u[tx][ty-1].y + u[tx][ty-1].z)/3);
float temp = 1 / (sqrt( derX * derX + derY * derY + TV_EPSILON ));
imgValue.x = temp;
imgValue.y = temp;
imgValue.z = temp;
*( (float3 *)imgO ) = imgValue;
}
}
/*! Computes a separate diffusivity for an RGB Image:
* (g_R,g_G,g_B)(R,G,B) :=
* (g(R),g(G),g(B))
* */
__global__ void compute_tv_diffusivity_separate_shared
(
const float3 *d_input,
float3 *d_output,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* imgO = (char*)d_output + y*pitchBytes + x*sizeof(float3);
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
if (x == 0) u[threadIdx.x][ty] = u[tx][ty];
else if (threadIdx.x == 0) u[0][ty] = *( ((float3*)imgP)-1 );
if (x == nx-1) u[tx+1][ty] = u[tx][ty];
else if (threadIdx.x == blockDim.x-1) u[tx+1][ty] = *( ((float3*)imgP)+1 );
if (y == 0) u[tx][0] = u[tx][ty];
else if (threadIdx.y == 0) u[tx][0] = *( (float3*)(imgP-pitchBytes) );
if (y == ny-1) u[tx][ty+1] = u[tx][ty];
else if (threadIdx.y == blockDim.y-1) u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
}
__syncthreads();
// make use of the constant TV_EPSILON
// ### implement me ###
if( x < nx && y < ny )//guards
{
// calculate gradient magnitude
// calculate g(R)
float derX_x = 0.5f * (u[tx+1][ty].x - u[tx-1][ty].x);
float derY_x = 0.5f * (u[tx][ty+1].x - u[tx][ty-1].x);
float temp_x = sqrt( derX_x * derX_x + derY_x * derY_x + TV_EPSILON );
imgValue.x = 1 / temp_x;
// calculate g(G)
float derX_y = 0.5f * (u[tx+1][ty].y - u[tx-1][ty].y);
float derY_y = 0.5f * (u[tx][ty+1].y - u[tx][ty-1].y);
float temp_y = sqrt( derX_y * derX_y + derY_y * derY_y + TV_EPSILON );
imgValue.y = 1 / temp_y;
// calculate g(B)
float derX_z = 0.5f * (u[tx+1][ty].z - u[tx-1][ty].z);
float derY_z = 0.5f * (u[tx][ty+1].z - u[tx][ty-1].z);
float temp_z = sqrt( derX_z * derX_z + derY_z * derY_z + TV_EPSILON );
imgValue.z = 1 / temp_z;
*( (float3 *)imgO ) = imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Jacobi scheme
//----------------------------------------------------------------------------
// mode 2 gray: Jacobi solver
__global__ void jacobi_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float weight,
int nx,
int ny,
size_t pitch
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// ### implement me ###
// dnt calculate A - waste of time & space
if (x < nx && y < ny) //guards
{
// setting boundary values to 0 to avoid corruption of Jacobi scheme
float phiR = ( x == nx-1) ? 0.0f : 0.5f *( g[tx+1][ty] + g[tx][ty] );
float phiL = ( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty] + g[tx][ty] );
float phiU = ( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1] + g[tx][ty] );
float phiD = ( y == ny-1) ? 0.0f : 0.5f *( g[tx][ty+1] + g[tx][ty] );
float Aii = ( 1 + (phiR + phiL + phiU + phiD ) * weight) ;
float sumN = weight* ( phiR * u[tx+1][ty] + phiL * u[tx-1][ty] +
phiU * u[tx][ty-1] + phiD * u[tx][ty+1]);
d_output[idx] = (d_original[idx] + sumN) / Aii;
}
}
// mode 2 interleaved: Jacobi solver
__global__ void jacobi_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float weight,
int nx,
int ny,
size_t pitchBytes
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const char* imgOut = (char*)d_output + y*pitchBytes + x*sizeof(float3);
const char* imgOrig = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// ### implement me ###
if (x < nx && y < ny) //guards
{
// setting boundary values to 0 to avoid corruption of Jacobi scheme
float phiR_x =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].x + g[tx][ty].x );
float phiL_x =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].x + g[tx][ty].x );
float phiU_x =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].x + g[tx][ty].x );
float phiD_x =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].x + g[tx][ty].x );
float Aii_x = ( 1 + (phiR_x + phiL_x + phiU_x + phiD_x ) * weight) ;
float sumN_x = weight* ( phiR_x * u[tx+1][ty].x + phiL_x * u[tx-1][ty].x +
phiU_x * u[tx][ty-1].x + phiD_x * u[tx][ty+1].x);
imgValue.x = (*((float*)imgOrig) + sumN_x)/Aii_x;
float phiR_y =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].y + g[tx][ty].y );
float phiL_y =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].y + g[tx][ty].y );
float phiU_y =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].y + g[tx][ty].y );
float phiD_y =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].y + g[tx][ty].y );
float Aii_y = ( 1 + (phiR_y + phiL_y + phiU_y + phiD_y ) * weight) ;
float sumN_y = weight* ( phiR_y * u[tx+1][ty].y + phiL_y * u[tx-1][ty].y +
phiU_y * u[tx][ty-1].y + phiD_y * u[tx][ty+1].y);
imgValue.y = (*((float*)imgOrig+1) + sumN_y)/Aii_y;
float phiR_z =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].z + g[tx][ty].z );
float phiL_z =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].z + g[tx][ty].z );
float phiU_z =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].z + g[tx][ty].z );
float phiD_z =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].z + g[tx][ty].z );
float Aii_z = ( 1 + (phiR_z + phiL_z + phiU_z + phiD_z ) * weight) ;
float sumN_z = weight* ( phiR_z * u[tx+1][ty].z + phiL_z * u[tx-1][ty].z +
phiU_z * u[tx][ty-1].z + phiD_z * u[tx][ty+1].z);
imgValue.z = (*((float*)imgOrig+2) + sumN_z)/Aii_z;
*((float3*)imgOut) =imgValue;
}
}
//----------------------------------------------------------------------------
// Non-linear Diffusion - Successive Over-Relaxation (SOR)
//----------------------------------------------------------------------------
// mode 3 gray: SOR solver
__global__ void sor_shared
(
float *d_output,
const float *d_input,
const float *d_original,
const float *d_diffusivity,
float weight,
float overrelaxation,
int nx,
int ny,
size_t pitch,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int idx = y*pitch + x;
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float u[DIFF_BW+2][DIFF_BH+2];
__shared__ float g[DIFF_BW+2][DIFF_BH+2];
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = d_input[idx];
g[tx][ty] = d_diffusivity[idx];
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = d_input[idx-1];
g[0][ty] = d_diffusivity[idx-1];
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = d_input[idx+1];
g[tx+1][ty] = d_diffusivity[idx+1];
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = d_input[idx-pitch];
g[tx][0] = d_diffusivity[idx-pitch];
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = d_input[idx+pitch];
g[tx][ty+1] = d_diffusivity[idx+pitch];
}
}
__syncthreads();
// ### implement me ###
// dnt calculate A - waste of time & space
if (x < nx && y < ny)//guards
{
// implementing the checkerboard pattern
if( (tx+ty) % 2 == red) // allow only threads with even sum if red==0 & odd sum if red==1
{
float phiR =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty] + g[tx][ty] );
float phiL =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty] + g[tx][ty] );
float phiU =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1] + g[tx][ty] );
float phiD =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1] + g[tx][ty] );
float Aii = ( 1 + (phiR + phiL + phiU + phiD ) * weight) ;
float sumNU = weight * ( phiR * u[tx+1][ty] + phiU * u[tx][ty-1] );
float sumNL = weight * ( phiL * u[tx-1][ty] + phiD * u[tx][ty+1] );
d_output[idx] = (1-overrelaxation) * u[tx][ty] +
(d_original[idx] + sumNU + sumNL) * overrelaxation / Aii;
}
}
}
// mode 3 interleaved: SOR solver
__global__ void sor_shared
(
float3 *d_output,
const float3 *d_input,
const float3 *d_original,
const float3 *d_diffusivity,
float weight,
float overrelaxation,
int nx,
int ny,
size_t pitchBytes,
int red
)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const char* imgP = (char*)d_input + y*pitchBytes + x*sizeof(float3);
const char* diffP = (char*)d_diffusivity + y*pitchBytes + x*sizeof(float3);
const char* imgOut = (char*)d_output + y*pitchBytes + x*sizeof(float3);
const char* imgOrig = (char*)d_original + y*pitchBytes + x*sizeof(float3);
const int tx = threadIdx.x+1;
const int ty = threadIdx.y+1;
__shared__ float3 u[DIFF_BW+2][DIFF_BH+2];
__shared__ float3 g[DIFF_BW+2][DIFF_BH+2];
float3 imgValue;
// load data into shared memory
if (x < nx && y < ny) {
u[tx][ty] = *( (float3*)imgP );
g[tx][ty] = *( (float3*)diffP );
if (x == 0) {
u[0][ty] = u[tx][ty];
g[0][ty] = g[tx][ty];
}
else if (threadIdx.x == 0) {
u[0][ty] = *( ((float3*)imgP)-1 );
g[0][ty] = *( ((float3*)diffP)-1 );
}
if (x == nx-1) {
u[tx+1][ty] = u[tx][ty];
g[tx+1][ty] = g[tx][ty];
}
else if (threadIdx.x == blockDim.x-1) {
u[tx+1][ty] = *( ((float3*)imgP)+1 );
g[tx+1][ty] = *( ((float3*)diffP)+1 );
}
if (y == 0) {
u[tx][0] = u[tx][ty];
g[tx][0] = g[tx][ty];
}
else if (threadIdx.y == 0) {
u[tx][0] = *( (float3*)(imgP-pitchBytes) );
g[tx][0] = *( (float3*)(diffP-pitchBytes) );
}
if (y == ny-1) {
u[tx][ty+1] = u[tx][ty];
g[tx][ty+1] = g[tx][ty];
}
else if (threadIdx.y == blockDim.y-1) {
u[tx][ty+1] = *( (float3*)(imgP+pitchBytes) );
g[tx][ty+1] = *( (float3*)(diffP+pitchBytes) );
}
}
__syncthreads();
// ### implement me ###
if (x < nx && y < ny) //guards
{
// implementing the checkerboard pattern
if( (tx+ty) % 2 == red) // allow only threads with even sum if red==0 & odd sum if red==1
{
// calculate imgValue.x
float phiR_x =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].x + g[tx][ty].x );
float phiL_x =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].x + g[tx][ty].x );
float phiU_x =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].x + g[tx][ty].x );
float phiD_x =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].x + g[tx][ty].x );
float Aii_x = ( 1 + (phiR_x + phiL_x + phiU_x + phiD_x ) * weight) ;
float sumNU_x = weight * ( phiR_x * u[tx+1][ty].x + phiU_x * u[tx][ty-1].x );
float sumNL_x = weight * ( phiL_x * u[tx-1][ty].x + phiD_x * u[tx][ty+1].x );
imgValue.x = (1-overrelaxation) * u[tx][ty].x +
( *((float *) imgOrig)+ sumNU_x + sumNL_x) * overrelaxation / Aii_x;
// calculate imgValue.y
float phiR_y =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].y + g[tx][ty].y );
float phiL_y =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].y + g[tx][ty].y );
float phiU_y =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].y + g[tx][ty].y );
float phiD_y =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].y + g[tx][ty].y );
float Aii_y = ( 1 + (phiR_y + phiL_y + phiU_y + phiD_y ) * weight) ;
float sumNU_y = weight * ( phiR_y * u[tx+1][ty].y + phiU_y * u[tx][ty-1].y );
float sumNL_y = weight * ( phiL_y * u[tx-1][ty].y + phiD_y * u[tx][ty+1].y );
imgValue.y = (1-overrelaxation) * u[tx][ty].y +
( *((float *) imgOrig + 1)+ sumNU_y + sumNL_y) * overrelaxation / Aii_y;
// calculate imgValue.z
float phiR_z =( x == nx-1 ) ? 0.0f : 0.5f *( g[tx+1][ty].z + g[tx][ty].z );
float phiL_z =( x == 0 ) ? 0.0f : 0.5f *( g[tx-1][ty].z + g[tx][ty].z );
float phiU_z =( y == 0 ) ? 0.0f : 0.5f *( g[tx][ty-1].z + g[tx][ty].z );
float phiD_z =( y == ny-1 ) ? 0.0f : 0.5f *( g[tx][ty+1].z + g[tx][ty].z );
float Aii_z = ( 1 + (phiR_z + phiL_z + phiU_z + phiD_z ) * weight) ;
float sumNU_z = weight * ( phiR_z * u[tx+1][ty].z + phiU_z * u[tx][ty-1].z );
float sumNL_z = weight * ( phiL_z * u[tx-1][ty].z + phiD_z * u[tx][ty+1].z );
imgValue.z = (1-overrelaxation) * u[tx][ty].z +
( *((float *) imgOrig + 2)+ sumNU_z + sumNL_z) * overrelaxation / Aii_z;
*((float3*) imgOut ) = imgValue;
}
}
}
//----------------------------------------------------------------------------
// Host function
//----------------------------------------------------------------------------
void gpu_diffusion
(
const float *input,
float *output,
int nx, int ny, int nc,
float timeStep,
int iterations,
float weight,
int lagged_iterations,
float overrelaxation,
int mode,
bool jointDiffusivity
)
{
int i,j;
size_t pitchF1, pitchBytesF1, pitchBytesF3;
float *d_input = 0;
float *d_output = 0;
float *d_diffusivity = 0;
float *d_original = 0;
float *temp = 0;
dim3 dimGrid((int)ceil((float)nx/DIFF_BW), (int)ceil((float)ny/DIFF_BH));
dim3 dimBlock(DIFF_BW,DIFF_BH);
// Allocation of GPU Memory
if (nc == 1) {
cutilSafeCall( cudaMallocPitch( (void**)&(d_input), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( cudaMallocPitch( (void**)&(d_output), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode) cutilSafeCall( cudaMallocPitch( (void**)&(d_diffusivity), &pitchBytesF1, nx*sizeof(float), ny ) );
if (mode >= 2) cutilSafeCall( cudaMallocPitch( (void**)&(d_original), &pitchBytesF1, nx*sizeof(float), ny ) );
cutilSafeCall( cudaMemcpy2D(d_input, pitchBytesF1, input, nx*sizeof(float), nx*sizeof(float), ny, cudaMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( cudaMemcpy2D(d_original, pitchBytesF1, d_input, pitchBytesF1, nx*sizeof(float), ny, cudaMemcpyDeviceToDevice) );
pitchF1 = pitchBytesF1/sizeof(float);
} else if (nc == 3) {
cutilSafeCall( cudaMallocPitch( (void**)&(d_input), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( cudaMallocPitch( (void**)&(d_output), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode) cutilSafeCall( cudaMallocPitch( (void**)&(d_diffusivity), &pitchBytesF3, nx*sizeof(float3), ny ) );
if (mode >= 2) cutilSafeCall( cudaMallocPitch( (void**)&(d_original), &pitchBytesF3, nx*sizeof(float3), ny ) );
cutilSafeCall( cudaMemcpy2D(d_input, pitchBytesF3, input, nx*sizeof(float3), nx*sizeof(float3), ny, cudaMemcpyHostToDevice) );
if (mode >= 2) cutilSafeCall( cudaMemcpy2D(d_original, pitchBytesF3, d_input, pitchBytesF3, nx*sizeof(float3), ny, cudaMemcpyDeviceToDevice) );
}
// Execution of the Diffusion Kernel
if (mode == 0) { // linear isotropic diffision
if (nc == 1) {
for (i=0;i<iterations;i++) {
diffuse_linear_isotrop_shared<<<dimGrid,dimBlock>>>(d_input, d_output, timeStep, nx, ny, pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
diffuse_linear_isotrop_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 1) { // nonlinear isotropic diffusion
if (nc == 1) {
for (i=0;i<iterations;i++) {
compute_tv_diffusivity_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
diffuse_nonlinear_isotrop_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,d_output,timeStep,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
compute_tv_diffusivity_joined_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
compute_tv_diffusivity_separate_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
diffuse_nonlinear_isotrop_shared<<<dimGrid,dimBlock>>>
((float3*)d_input,(float3*)d_diffusivity,(float3*)d_output,timeStep,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (mode == 2) { // Jacobi-method
if (nc == 1) {
for (i=0;i<iterations;i++) {
compute_tv_diffusivity_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
for (j=0;j<lagged_iterations;j++) {
jacobi_shared<<<dimGrid,dimBlock>>> (d_output,d_input,d_original,
d_diffusivity,weight,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
else if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
compute_tv_diffusivity_joined_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
compute_tv_diffusivity_separate_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
for (j=0;j<lagged_iterations;j++) {
jacobi_shared<<<dimGrid,dimBlock>>>
((float3*)d_output,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
temp = d_input;
d_input = d_output;
d_output = temp;
}
}
}
}
else if (mode == 3) { // Successive Over Relaxation (Gauss-Seidel with extrapolation)
if (nc == 1) {
for (i=0;i<iterations;i++) {
compute_tv_diffusivity_shared<<<dimGrid,dimBlock>>>(d_input,d_diffusivity,nx,ny,pitchF1);
cutilSafeCall( cudaThreadSynchronize() );
for(j=0;j<lagged_iterations;j++) {
sor_shared<<<dimGrid,dimBlock>>>(d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 0);
cutilSafeCall( cudaThreadSynchronize() );
sor_shared<<<dimGrid,dimBlock>>>(d_input,d_input,d_original,
d_diffusivity,weight,overrelaxation,nx,ny,pitchF1, 1);
cutilSafeCall( cudaThreadSynchronize() );
}
}
}
if (nc == 3) {
for (i=0;i<iterations;i++) {
if (jointDiffusivity)
compute_tv_diffusivity_joined_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
else
compute_tv_diffusivity_separate_shared<<<dimGrid,dimBlock>>>((float3*)d_input,(float3*)d_diffusivity,nx,ny,pitchBytesF3);
cutilSafeCall( cudaThreadSynchronize() );
for (j=0;j<lagged_iterations;j++) {
sor_shared<<<dimGrid,dimBlock>>>
((float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 0);
cutilSafeCall( cudaThreadSynchronize() );
sor_shared<<<dimGrid,dimBlock>>>
((float3*)d_input,(float3*)d_input,
(float3*)d_original,(float3*)d_diffusivity,
weight,overrelaxation,nx,ny,pitchBytesF3, 1);
cutilSafeCall( cudaThreadSynchronize() );
}
}
}
}
if (nc == 1) {
if (mode == 3) cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float), d_input, pitchBytesF1, nx*sizeof(float), ny, cudaMemcpyDeviceToHost) );
else cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float), d_output, pitchBytesF1, nx*sizeof(float), ny, cudaMemcpyDeviceToHost) );
} else if (nc == 3) {
if (mode == 3) cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float3), d_input, pitchBytesF3, nx*sizeof(float3), ny, cudaMemcpyDeviceToHost) );
else cutilSafeCall( cudaMemcpy2D(output, nx*sizeof(float3), d_output, pitchBytesF3, nx*sizeof(float3), ny, cudaMemcpyDeviceToHost) );
}
// clean up
if (d_original) cutilSafeCall( cudaFree(d_original) );
if (d_diffusivity) cutilSafeCall( cudaFree(d_diffusivity) );
if (d_output) cutilSafeCall( cudaFree(d_output) );
if (d_input) cutilSafeCall( cudaFree(d_input) );
} |
vectorSum_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2015, Julian Straub <[email protected]> Licensed
* under the MIT license. See the license file LICENSE.
*/
#include <jsCore/cuda_global.h>
// executions per thread
#define N_PER_T 32
#define BLOCK_SIZE 256
//#define K 6
#define DIM 3
#define SS_DIM (DIM+1)
template<typename T, uint32_t K, uint32_t BLK_SIZE>
__global__ void vectorSum_kernel(T *d_x,
uint32_t *z, uint32_t N, uint32_t k0, T *SSs)
{
// sufficient statistics for whole blocksize
// 3 (sum) + 1 (count)
__shared__ T xSSs[BLK_SIZE*SS_DIM*K];
//const int tid = threadIdx.x;
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
#pragma unroll
for(int s=0; s< K*SS_DIM; ++s) {
// this is almost certainly bad ordering
xSSs[tid*K*SS_DIM+s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
int32_t k = z[id]-k0;
if(0 <= k && k < K)
{
// input sufficient statistics
// because Eigen is col major by default
xSSs[tid*SS_DIM*K+k*SS_DIM+0] += d_x[id*DIM+0];
xSSs[tid*SS_DIM*K+k*SS_DIM+1] += d_x[id*DIM+1];
xSSs[tid*SS_DIM*K+k*SS_DIM+2] += d_x[id*DIM+2];
// xSSs[tid*SS_DIM*K+k*SS_DIM+0] += d_x[id];
// xSSs[tid*SS_DIM*K+k*SS_DIM+1] += d_x[N+id];
// xSSs[tid*SS_DIM*K+k*SS_DIM+2] += d_x[2*N+id];
xSSs[tid*SS_DIM*K+k*SS_DIM+3] += 1.0f;
}
}
// old reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
const uint32_t si = s*K*SS_DIM;
const uint32_t tidk = tid*K*SS_DIM;
#pragma unroll
for( int k=0; k<K*SS_DIM; ++k) {
xSSs[tidk+k] += xSSs[si+tidk+k];
}
}
__syncthreads();
}
if(tid < K*SS_DIM) {
// sum the last two remaining matrixes directly into global memory
atomicAdd_<T>(&SSs[tid],xSSs[tid]+xSSs[tid+K*SS_DIM]);
}
}
extern void vectorSum_gpu( double *d_x, uint32_t *d_z , uint32_t N,
uint32_t k0, uint32_t K, double *d_SSs)
{
const uint32_t BLK_SIZE = BLOCK_SIZE/2;
assert(BLK_SIZE > DIM*K+DIM*(DIM-1)*K);
dim3 threads(BLK_SIZE,1,1);
dim3 blocks(N/(BLK_SIZE*N_PER_T)+(N%(BLK_SIZE*N_PER_T)>0?1:0),1,1);
if(K == 1){
hipLaunchKernelGGL(( vectorSum_kernel<double,1,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==2){
hipLaunchKernelGGL(( vectorSum_kernel<double,2,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==3){
hipLaunchKernelGGL(( vectorSum_kernel<double,3,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==4){
hipLaunchKernelGGL(( vectorSum_kernel<double,4,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==5){
hipLaunchKernelGGL(( vectorSum_kernel<double,5,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==6){
hipLaunchKernelGGL(( vectorSum_kernel<double,6,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else{
assert(false);
}
checkCudaErrors(hipDeviceSynchronize());
};
extern void vectorSum_gpu(float *d_x, uint32_t *d_z,
uint32_t N, uint32_t k0, uint32_t K, float *d_SSs)
{
const uint32_t BLK_SIZE = BLOCK_SIZE;
assert(BLK_SIZE > DIM*K+DIM*(DIM-1)*K);
dim3 threads(BLK_SIZE,1,1);
dim3 blocks(N/(BLK_SIZE*N_PER_T)+(N%(BLK_SIZE*N_PER_T)>0?1:0),1,1);
if(K == 1){
hipLaunchKernelGGL(( vectorSum_kernel<float,1,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==2){
hipLaunchKernelGGL(( vectorSum_kernel<float,2,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==3){
hipLaunchKernelGGL(( vectorSum_kernel<float,3,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==4){
hipLaunchKernelGGL(( vectorSum_kernel<float,4,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==5){
hipLaunchKernelGGL(( vectorSum_kernel<float,5,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else if(K==6){
hipLaunchKernelGGL(( vectorSum_kernel<float,6,BLK_SIZE>), dim3(blocks),dim3(threads), 0, 0,
d_x, d_z,N,k0,d_SSs);
}else{
assert(false);
}
checkCudaErrors(hipDeviceSynchronize());
};
| vectorSum_kernel.cu | /* Copyright (c) 2015, Julian Straub <[email protected]> Licensed
* under the MIT license. See the license file LICENSE.
*/
#include <jsCore/cuda_global.h>
// executions per thread
#define N_PER_T 32
#define BLOCK_SIZE 256
//#define K 6
#define DIM 3
#define SS_DIM (DIM+1)
template<typename T, uint32_t K, uint32_t BLK_SIZE>
__global__ void vectorSum_kernel(T *d_x,
uint32_t *z, uint32_t N, uint32_t k0, T *SSs)
{
// sufficient statistics for whole blocksize
// 3 (sum) + 1 (count)
__shared__ T xSSs[BLK_SIZE*SS_DIM*K];
//const int tid = threadIdx.x;
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
// caching
#pragma unroll
for(int s=0; s< K*SS_DIM; ++s) {
// this is almost certainly bad ordering
xSSs[tid*K*SS_DIM+s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
for(int id=idx*N_PER_T; id<min(N,(idx+1)*N_PER_T); ++id)
{
int32_t k = z[id]-k0;
if(0 <= k && k < K)
{
// input sufficient statistics
// because Eigen is col major by default
xSSs[tid*SS_DIM*K+k*SS_DIM+0] += d_x[id*DIM+0];
xSSs[tid*SS_DIM*K+k*SS_DIM+1] += d_x[id*DIM+1];
xSSs[tid*SS_DIM*K+k*SS_DIM+2] += d_x[id*DIM+2];
// xSSs[tid*SS_DIM*K+k*SS_DIM+0] += d_x[id];
// xSSs[tid*SS_DIM*K+k*SS_DIM+1] += d_x[N+id];
// xSSs[tid*SS_DIM*K+k*SS_DIM+2] += d_x[2*N+id];
xSSs[tid*SS_DIM*K+k*SS_DIM+3] += 1.0f;
}
}
// old reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLK_SIZE)/2; s>1; s>>=1) {
if(tid < s)
{
const uint32_t si = s*K*SS_DIM;
const uint32_t tidk = tid*K*SS_DIM;
#pragma unroll
for( int k=0; k<K*SS_DIM; ++k) {
xSSs[tidk+k] += xSSs[si+tidk+k];
}
}
__syncthreads();
}
if(tid < K*SS_DIM) {
// sum the last two remaining matrixes directly into global memory
atomicAdd_<T>(&SSs[tid],xSSs[tid]+xSSs[tid+K*SS_DIM]);
}
}
extern void vectorSum_gpu( double *d_x, uint32_t *d_z , uint32_t N,
uint32_t k0, uint32_t K, double *d_SSs)
{
const uint32_t BLK_SIZE = BLOCK_SIZE/2;
assert(BLK_SIZE > DIM*K+DIM*(DIM-1)*K);
dim3 threads(BLK_SIZE,1,1);
dim3 blocks(N/(BLK_SIZE*N_PER_T)+(N%(BLK_SIZE*N_PER_T)>0?1:0),1,1);
if(K == 1){
vectorSum_kernel<double,1,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==2){
vectorSum_kernel<double,2,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==3){
vectorSum_kernel<double,3,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==4){
vectorSum_kernel<double,4,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==5){
vectorSum_kernel<double,5,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==6){
vectorSum_kernel<double,6,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else{
assert(false);
}
checkCudaErrors(cudaDeviceSynchronize());
};
extern void vectorSum_gpu(float *d_x, uint32_t *d_z,
uint32_t N, uint32_t k0, uint32_t K, float *d_SSs)
{
const uint32_t BLK_SIZE = BLOCK_SIZE;
assert(BLK_SIZE > DIM*K+DIM*(DIM-1)*K);
dim3 threads(BLK_SIZE,1,1);
dim3 blocks(N/(BLK_SIZE*N_PER_T)+(N%(BLK_SIZE*N_PER_T)>0?1:0),1,1);
if(K == 1){
vectorSum_kernel<float,1,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==2){
vectorSum_kernel<float,2,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==3){
vectorSum_kernel<float,3,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==4){
vectorSum_kernel<float,4,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==5){
vectorSum_kernel<float,5,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else if(K==6){
vectorSum_kernel<float,6,BLK_SIZE><<<blocks,threads>>>(
d_x, d_z,N,k0,d_SSs);
}else{
assert(false);
}
checkCudaErrors(cudaDeviceSynchronize());
};
|
fb8ecb10cac0ca650cc6addc831caa329cb29b67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/NumericLimits.cuh>
#include <THH/THH.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
#ifdef __HIP_PLATFORM_HCC__
#define WARP_SIZE 64
#define SYNCWARP(mask)
#else
#define WARP_SIZE 32
#define SYNCWARP(mask) __syncwarp(mask)
#endif
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + ::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = ::max(block_size, static_cast<uint64_t>(WARP_SIZE));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + ::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / WARP_SIZE)) - 1;
if (threadIdx.x < WARP_SIZE) {
int lane = threadIdx.x % WARP_SIZE;
if (lane < blockDim.x / WARP_SIZE) {
#pragma unroll
for (int i = 0; i < WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * WARP_SIZE + i]);
}
SYNCWARP(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / WARP_SIZE)) - 1;
if (threadIdx.x < WARP_SIZE) {
int lane = threadIdx.x % WARP_SIZE;
if (lane < blockDim.x / WARP_SIZE) {
#pragma unroll
for (int i = 0; i < WARP_SIZE; ++i) {
warpVal1 = r1(warpVal1, smem[lane * WARP_SIZE + i]);
warpVal2 = r2(warpVal2, smem[lane * WARP_SIZE + i + blockDim.x]);
}
SYNCWARP(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / WARP_SIZE; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes,
const float smoothing)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss with label smoothing
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = (max_k + ::log(sumAll) - sum_k / classes) \
* smoothing - log_prob * (1 - smoothing);
max_log_sum_exp[blockIdx.x] = max_k + ::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const float smoothing,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>)
, dim3(grid), dim3(block), 2 * block.x * sizeof(accscalar_t), stream,
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), 2 * block.x * sizeof(accscalar_t), stream,
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
}
);
THCudaCheck(hipGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
}
);
THCudaCheck(hipGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const float smoothing, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, smoothing, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && logits.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, smoothing, half_to_float);
}
| fb8ecb10cac0ca650cc6addc831caa329cb29b67.cu | /**
* From PyTorch:
*
* Copyright (c) 2016- Facebook, Inc (Adam Paszke)
* Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
* Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
* Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
* Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
* Copyright (c) 2011-2013 NYU (Clement Farabet)
* Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
* Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
* Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
*
* From Caffe2:
*
* Copyright (c) 2016-present, Facebook Inc. All rights reserved.
*
* All contributions by Facebook:
* Copyright (c) 2016 Facebook Inc.
*
* All contributions by Google:
* Copyright (c) 2015 Google Inc.
* All rights reserved.
*
* All contributions by Yangqing Jia:
* Copyright (c) 2015 Yangqing Jia
* All rights reserved.
*
* All contributions from Caffe:
* Copyright(c) 2013, 2014, 2015, the respective contributors
* All rights reserved.
*
* All other contributions:
* Copyright(c) 2015, 2016 the respective contributors
* All rights reserved.
*
* Caffe2 uses a copyright model similar to Caffe: each contributor holds
* copyright over their contributions to Caffe2. The project versioning records
* all such contribution and copyright details. If a contributor wants to further
* mark their specific copyright on a particular contribution, they should
* indicate their copyright solely in the commit message of the change when it is
* committed.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
* and IDIAP Research Institute nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <THC/THC.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include "type_shim.h"
#include "compat.h"
#define ALIGN_BYTES 16
#ifdef __HIP_PLATFORM_HCC__
#define WARP_SIZE 64
#define SYNCWARP(mask)
#else
#define WARP_SIZE 32
#define SYNCWARP(mask) __syncwarp(mask)
#endif
using Tensor = at::Tensor;
using TensorList = at::TensorList;
using ScalarType = at::ScalarType;
using at::acc_type;
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_log_sum_exp)
: logsum(max_log_sum_exp) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
const int max_threads = 1024;
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < (max_block_size/2)) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(WARP_SIZE));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / WARP_SIZE)) - 1;
if (threadIdx.x < WARP_SIZE) {
int lane = threadIdx.x % WARP_SIZE;
if (lane < blockDim.x / WARP_SIZE) {
#pragma unroll
for (int i = 0; i < WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * WARP_SIZE + i]);
}
SYNCWARP(mask);
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename> class Reduction1, template<typename> class Reduction2, typename AccumT>
__device__ __forceinline__ void
blockReduce(AccumT* smem,
AccumT* reducVal1,
AccumT val1,
const Reduction1<AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
AccumT val2,
const Reduction2<AccumT>& r2,
AccumT defaultVal2)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val1;
smem[blockDim.x + threadIdx.x] = val2;
__syncthreads();
AccumT warpVal1 = defaultVal1;
AccumT warpVal2 = defaultVal2;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / WARP_SIZE)) - 1;
if (threadIdx.x < WARP_SIZE) {
int lane = threadIdx.x % WARP_SIZE;
if (lane < blockDim.x / WARP_SIZE) {
#pragma unroll
for (int i = 0; i < WARP_SIZE; ++i) {
warpVal1 = r1(warpVal1, smem[lane * WARP_SIZE + i]);
warpVal2 = r2(warpVal2, smem[lane * WARP_SIZE + i + blockDim.x]);
}
SYNCWARP(mask);
smem[lane] = warpVal1;
smem[lane + blockDim.x] = warpVal2;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal1 = defaultVal1;
AccumT blockVal2 = defaultVal2;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / WARP_SIZE; ++i) {
blockVal1 = r1(blockVal1, smem[i]);
blockVal2 = r2(blockVal2, smem[i + blockDim.x]);
}
smem[0] = blockVal1;
smem[blockDim.x] = blockVal2;
}
// Sync and broadcast
__syncthreads();
*reducVal1 = smem[0];
*reducVal2 = smem[blockDim.x];
__syncthreads();
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(int shift,
T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal = r(threadVal, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal = r(threadVal, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <template<typename, typename> class Reduction1, template<typename, typename> class Reduction2, int ILP, typename T, typename AccumT>
__device__ __forceinline__ void
ilpReduce(int shift,
T* data,
int size,
AccumT* reducVal1,
const Reduction1<T, AccumT>& r1,
AccumT defaultVal1,
AccumT* reducVal2,
const Reduction2<T, AccumT>& r2,
AccumT defaultVal2)
{
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LoadT;
AccumT threadVal1 = defaultVal1;
AccumT threadVal2 = defaultVal2;
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
data -= shift;
size += shift;
if(threadIdx.x >= shift){
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
size -= blockDim.x;
data += blockDim.x;
}
int last = size % (ILP * blockDim.x);
T v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
for (; offset * ILP < (size - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(data)[offset];
for (int j = 0; j < ILP; ++j) {
threadVal1 = r1(threadVal1, v[j]);
threadVal2 = r2(threadVal2, v[j]);
}
}
offset = size - last + threadIdx.x;
// Epilogue
for (; offset < size; offset += blockDim.x) {
threadVal1 = r1(threadVal1, data[offset]);
threadVal2 = r2(threadVal2, data[offset]);
}
*reducVal1 = threadVal1;
*reducVal2 = threadVal2;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyForward(
accscalar_t *losses,
outscalar_t *max_log_sum_exp,
scalar_t *input,
int64_t *labels,
int64_t classes,
const float smoothing)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
//output += blockIdx.x * classes;
const int shift = ((uint64_t)input) % ALIGN_BYTES / sizeof(scalar_t);
int64_t label = labels[blockIdx.x];
// find the max and sum
accscalar_t threadMax, threadSum, max_k, sum_k;
ilpReduce<MaxFloat, AddFloat, ILP, scalar_t, accscalar_t>(
shift, input, classes,
&threadMax, MaxFloat<scalar_t, accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&threadSum, AddFloat<scalar_t, accscalar_t>(),
static_cast<accscalar_t>(0));
blockReduce<Max, Add, accscalar_t>(
sdata,
&max_k, threadMax, Max<accscalar_t>(),
-at::numeric_limits<accscalar_t>::max(),
&sum_k, threadSum, Add<accscalar_t>(),
static_cast<accscalar_t>(0));
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(shift, input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
// calculate per element loss with label smoothing
// reserve max + log_sum_exp for bprop
if (threadIdx.x == 0) {
accscalar_t log_prob = epilogue(static_cast<accscalar_t>(input[label]));
losses[blockIdx.x] = (max_k + std::log(sumAll) - sum_k / classes) \
* smoothing - log_prob * (1 - smoothing);
max_log_sum_exp[blockIdx.x] = max_k + std::log(sumAll);
}
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
apply(scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
accscalar_t tmpLogits[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpLogits[j] = static_cast<accscalar_t>(logits[offset + j * blockDim.x]);
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = tmpGradOutput * (
std::exp(tmpLogits[j] - coeff) - static_cast<accscalar_t>(
(offset + j * blockDim.x == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>((offset == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t>
__device__ __forceinline__ void
aligned_apply(int shift,
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
accscalar_t smooth_positives = 1.0 - smoothing;
accscalar_t smooth_negatives = smoothing / classes;
accscalar_t tmpGradOutput = gradOutput[blockIdx.x];
int64_t label = labels[blockIdx.x];
accscalar_t coeff = max_log_sum_exp[blockIdx.x];
int offset = threadIdx.x;
// shift and do 1
if(shift > 0){
logits -= shift;
gradInput -= shift;
classes += shift;
if(threadIdx.x >= shift){
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
classes -= blockDim.x;
gradInput += blockDim.x;
logits += blockDim.x;
shift -= blockDim.x;
}
int last = classes % (ILP * blockDim.x);
typedef typename std::aligned_storage<ILP*sizeof(scalar_t), ILP*alignof(scalar_t)>::type LoadT;
// input
scalar_t v[ILP];
LoadT* value = reinterpret_cast<LoadT*>(&v);
// output
scalar_t r[ILP];
LoadT* result = reinterpret_cast<LoadT*>(&r);
for (; offset * ILP < (classes - last); offset += blockDim.x) {
*value = reinterpret_cast<LoadT*>(logits)[offset];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
r[j] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(v[j]) - coeff) -
static_cast<accscalar_t>(((ILP * offset + j - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
reinterpret_cast<LoadT*>(gradInput)[offset] = *result;
}
offset = classes - last + threadIdx.x;
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = tmpGradOutput * (std::exp(
static_cast<accscalar_t>(logits[offset]) - coeff) -
static_cast<accscalar_t>(((offset - shift) == label) ? 1 : 0) *
smooth_positives - smooth_negatives);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxXEntropyBackward(
scalar_t *gradInput,
scalar_t *logits,
outscalar_t *max_log_sum_exp,
outscalar_t *gradOutput,
int64_t *labels,
const float smoothing,
int classes)
{
gradInput += blockIdx.x * classes;
logits += blockIdx.x * classes;
// Do vectorized load/store when input/output have same alignment
const int shift = ((uint64_t)logits) % ALIGN_BYTES / sizeof(scalar_t);
const int shift_ = ((uint64_t)gradInput) % ALIGN_BYTES / sizeof(scalar_t);
if (shift == shift_){
aligned_apply<ILP, scalar_t, accscalar_t, outscalar_t>(shift, gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
else {
apply<ILP, scalar_t, accscalar_t, outscalar_t>(gradInput, logits, max_log_sum_exp, gradOutput, labels, smoothing, classes);
}
}
template<template<typename, typename, typename> class Epilogue>
std::vector<Tensor> host_softmax_xentropy(
const Tensor & input_,
const Tensor & labels_,
const float smoothing,
const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.type().scalarType() == ScalarType::Half,"conversion is supported for Half type only");
AT_ASSERTM(labels_.type().scalarType() == ScalarType::Long,"Label type should be CUDA Long");
auto input = input_.contiguous();
Tensor max_log_sum_exp = at::empty_like(labels_, half_to_float ? input.options().dtype(ScalarType::Float) : input.options());
Tensor losses = at::empty_like(labels_, input_.options().dtype(ScalarType::Float));
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
AT_ASSERTM(input.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels_.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(input.size(0) == labels_.size(0), "Input and label should have same number of examples");
AT_ASSERTM(input.numel() > 0, "Number of classes in input should not be 0");
const int64_t dim = 1;
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "host_softmax_xentropy",
using accscalar_t = at::acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<scalar_t_0>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
} else {
cunn_SoftMaxXEntropyForward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, 2 * block.x * sizeof(accscalar_t), stream>>>(
losses.DATA_PTR<accscalar_t>(), max_log_sum_exp.DATA_PTR<accscalar_t>(),
input.DATA_PTR<scalar_t_0>(), labels_.DATA_PTR<int64_t>(),
dim_size, smoothing
);
}
);
THCudaCheck(cudaGetLastError());
std::vector<at::Tensor> ret = {losses, max_log_sum_exp};
return ret;
}
template<template<typename, typename, typename> class Epilogue>
Tensor host_softmax_xentropy_backward(
const at::Tensor &grad_loss,
const at::Tensor &logits_,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing,
bool half_to_float) {
const int64_t dim = 1;
Tensor gI = at::empty_like(logits_);
if (grad_loss.numel() == 0) {
return gI;
}
auto grad = grad_loss.contiguous();
auto logits = logits_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value ||
std::is_same<acc_type<at::Half, true>, double>::value,
"accscalar_t for half should be float or double");
if (grad.dim() == 0) grad = grad.view(1);
AT_ASSERTM(logits_.dim() == 2, "Currently only 2 dim input supported");
AT_ASSERTM(labels.dim() == 1, "Labels should be 1 dimensional");
AT_ASSERTM(logits_.numel() > 0, "Number of classes in input should not be 0");
AT_ASSERTM(logits_.size(0) == labels.size(0), "Input and label should have same number of examples");
AT_ASSERTM(labels.size(0) == grad.size(0), "Label and loss should have same number of examples");
int64_t outer_size = 1;
int64_t dim_size = logits.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= logits.size(i);
for (int64_t i = dim + 1; i < logits.dim(); ++i)
inner_size *= logits.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
TORCH_CHECK(inner_size == 1, "Currently only inner size 1 supported");
dim3 grid(outer_size);
DISPATCH_FLOAT_AND_HALF(gI.scalar_type(), 0, "host_softmax_xentropy_backward",
using accscalar_t = acc_type<scalar_t_0, true>;
const int ILP = sizeof(float4)/sizeof(scalar_t_0);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
if (!half_to_float) {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, scalar_t_0, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<scalar_t_0>(),
grad.DATA_PTR<scalar_t_0>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
} else {
cunn_SoftMaxXEntropyBackward<ILP, scalar_t_0, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.DATA_PTR<scalar_t_0>(), logits.DATA_PTR<scalar_t_0>(),
max_log_sum_exp.DATA_PTR<accscalar_t>(),
grad.DATA_PTR<accscalar_t>(), labels.DATA_PTR<int64_t>(),
smoothing, dim_size
);
}
);
THCudaCheck(cudaGetLastError());
return gI;
}
std::vector<Tensor> softmax_xentropy_cuda(const Tensor &input, const Tensor &labels, const float smoothing, const bool half_to_float){
return host_softmax_xentropy<LogSoftMaxForwardEpilogue>(input, labels, smoothing, half_to_float);
}
at::Tensor softmax_xentropy_backward_cuda(
const at::Tensor &grad_loss,
const at::Tensor &logits,
const at::Tensor &max_log_sum_exp,
const at::Tensor &labels,
const float smoothing) {
bool half_to_float = grad_loss.type().scalarType() != logits.type().scalarType();
if (half_to_float) {
AT_ASSERTM((grad_loss.type().scalarType() == ScalarType::Float && logits.type().scalarType() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_xentropy_backward<LogSoftMaxBackwardEpilogue>(grad_loss, logits, max_log_sum_exp, labels, smoothing, half_to_float);
}
|
01633cbf12d3ff9641355062f4a78a69c98b3287.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template<typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_set(bottom[i]->count(), Dtype(0),
bottom[i]->mutable_gpu_data());
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel(
CL_KERNEL_SELECT("gpu_set"));
viennacl::ocl::enqueue(
oclk_gpu_set(
bottom[i]->count(), Dtype(0),
WrapHandle((cl_mem) bottom[i]->mutable_gpu_data(), &ctx)),
ctx.get_queue());
ctx.get_queue().finish();
#endif
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer);
} // namespace caffe
| 01633cbf12d3ff9641355062f4a78a69c98b3287.cu | #include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template<typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_set(bottom[i]->count(), Dtype(0),
bottom[i]->mutable_gpu_data());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_context_->id());
viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel(
CL_KERNEL_SELECT("gpu_set"));
viennacl::ocl::enqueue(
oclk_gpu_set(
bottom[i]->count(), Dtype(0),
WrapHandle((cl_mem) bottom[i]->mutable_gpu_data(), &ctx)),
ctx.get_queue());
ctx.get_queue().finish();
#endif
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer);
} // namespace caffe
|
2813c82450bca004316df81b9b1593b86cf03d01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void primal(float* u, float* u_, const float* f, const float* p1, const float* p2, const double tau, const int X, const int Y)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// center point
int c = y*X + x;
float div_x = 0.0f;
float div_y = 0.0f;
if (x == 0)
div_x = p1[c];
if (x > 0 & x < X-1)
div_x = p1[c]-p1[c-1];
if (x == X-1)
div_x = -p1[c-1];
if (y == 0)
div_y = p2[c];
if (y > 0 && y < Y-1)
div_y = p2[c]-p2[c-X];
if (y == Y-1)
div_y = -p2[c-X];
float u_old = u[c];
u[c] = (u_old + tau*(+div_x+div_y+f[c]))/(1+tau);
u_[c] = 2*u[c]-u_old;
} | 2813c82450bca004316df81b9b1593b86cf03d01.cu | #include "includes.h"
__global__ void primal(float* u, float* u_, const float* f, const float* p1, const float* p2, const double tau, const int X, const int Y)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// center point
int c = y*X + x;
float div_x = 0.0f;
float div_y = 0.0f;
if (x == 0)
div_x = p1[c];
if (x > 0 & x < X-1)
div_x = p1[c]-p1[c-1];
if (x == X-1)
div_x = -p1[c-1];
if (y == 0)
div_y = p2[c];
if (y > 0 && y < Y-1)
div_y = p2[c]-p2[c-X];
if (y == Y-1)
div_y = -p2[c-X];
float u_old = u[c];
u[c] = (u_old + tau*(+div_x+div_y+f[c]))/(1+tau);
u_[c] = 2*u[c]-u_old;
} |
5aba394b63aa002789eefd4a59aca934ea186d2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Thrust code needs to be compiled with nvcc
#include <memory>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "miopen_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _Fill(
T* output_data,
T val,
HIP_LONG N) {
HIP_LONG id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = val;
id += blockDim.x;
}
}
}
template <typename T>
void Fill(hipStream_t stream, T* output, T value, int64_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
HIP_LONG N = static_cast<HIP_LONG>(count);
hipLaunchKernelGGL(HIP_KERNEL_NAME(_Fill<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, output, value, N);
}
template <typename T>
class ConstantBufferImpl : public IConstantBuffer<T> {
public:
ConstantBufferImpl(T val) : val_(val), buffer_(nullptr), count_(0) {
}
~ConstantBufferImpl() {
if (buffer_)
hipFree(buffer_);
}
virtual const T* GetBuffer(hipStream_t stream, size_t count) {
if (count > count_) {
if (buffer_) {
hipFree(buffer_);
buffer_ = nullptr;
}
HIP_CALL_THROW(hipMalloc(&buffer_, count * sizeof(T)));
count_ = count;
Fill(stream, buffer_, val_, count);
}
return buffer_;
}
private:
T* buffer_;
size_t count_;
T val_;
};
template <typename T>
std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() {
return onnxruntime::make_unique<ConstantBufferImpl<T>>(Consts<T>::One);
}
template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>();
template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>();
template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>();
#define SPECIALIZED_FILL(T) \
template void Fill<T>(hipStream_t stream, T * output, T value, int64_t count);
SPECIALIZED_FILL(int8_t)
SPECIALIZED_FILL(int16_t)
SPECIALIZED_FILL(int32_t)
SPECIALIZED_FILL(int64_t)
SPECIALIZED_FILL(float)
SPECIALIZED_FILL(double)
SPECIALIZED_FILL(__half)
} // namespace rocm
} // namespace onnxruntime
| 5aba394b63aa002789eefd4a59aca934ea186d2a.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Thrust code needs to be compiled with nvcc
#include <memory>
#include "core/providers/rocm/shared_inc/rocm_utils.h"
#include "core/providers/rocm/cu_inc/common.cuh"
#include "miopen_common.h"
namespace onnxruntime {
namespace rocm {
template <typename T, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _Fill(
T* output_data,
T val,
HIP_LONG N) {
HIP_LONG id = NumElementsPerThread * blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = val;
id += blockDim.x;
}
}
}
template <typename T>
void Fill(hipStream_t stream, T* output, T value, int64_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
HIP_LONG N = static_cast<HIP_LONG>(count);
hipLaunchKernelGGL(HIP_KERNEL_NAME(_Fill<T, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, output, value, N);
}
template <typename T>
class ConstantBufferImpl : public IConstantBuffer<T> {
public:
ConstantBufferImpl(T val) : val_(val), buffer_(nullptr), count_(0) {
}
~ConstantBufferImpl() {
if (buffer_)
hipFree(buffer_);
}
virtual const T* GetBuffer(hipStream_t stream, size_t count) {
if (count > count_) {
if (buffer_) {
hipFree(buffer_);
buffer_ = nullptr;
}
HIP_CALL_THROW(hipMalloc(&buffer_, count * sizeof(T)));
count_ = count;
Fill(stream, buffer_, val_, count);
}
return buffer_;
}
private:
T* buffer_;
size_t count_;
T val_;
};
template <typename T>
std::unique_ptr<IConstantBuffer<T>> CreateConstantOnes() {
return onnxruntime::make_unique<ConstantBufferImpl<T>>(Consts<T>::One);
}
template std::unique_ptr<IConstantBuffer<float>> CreateConstantOnes<float>();
template std::unique_ptr<IConstantBuffer<double>> CreateConstantOnes<double>();
template std::unique_ptr<IConstantBuffer<half>> CreateConstantOnes<half>();
#define SPECIALIZED_FILL(T) \
template void Fill<T>(hipStream_t stream, T * output, T value, int64_t count);
SPECIALIZED_FILL(int8_t)
SPECIALIZED_FILL(int16_t)
SPECIALIZED_FILL(int32_t)
SPECIALIZED_FILL(int64_t)
SPECIALIZED_FILL(float)
SPECIALIZED_FILL(double)
SPECIALIZED_FILL(__half)
} // namespace rocm
} // namespace onnxruntime
|
bb555db928a15650d6bd0f1b3ed4fb135754332c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// for printing arrays
#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
// for min
#include <algorithm>
#include "util/cuda_wrap.h"
#include "fixnum_array.h"
// TODO: The only device function in this file is the dispatch kernel
// mechanism, which could arguably be placed elsewhere, thereby
// allowing this file to be compiled completely for the host.
// Notes: Read programming guide Section K.3
// - Can prefetch unified memory
// - Can advise on location of unified memory
// TODO: Can I use smart pointers? unique_ptr?
// TODO: Clean this up
namespace {
typedef std::uint8_t byte;
template< typename T >
static byte *as_byte_ptr(T *ptr) {
return reinterpret_cast<byte *>(ptr);
}
template< typename T >
static const byte *as_byte_ptr(const T *ptr) {
return reinterpret_cast<const byte *>(ptr);
}
// TODO: refactor from word_fixnum.
template< typename T >
T ceilquo(T n, T d) {
return (n + d - 1) / d;
}
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::create(size_t nelts) {
fixnum_array *a = new fixnum_array;
a->nelts = nelts;
if (nelts > 0) {
size_t nbytes = nelts * fixnum::BYTES;
cuda_malloc_managed(&a->ptr, nbytes);
}
return a;
}
template< typename fixnum >
template< typename T >
fixnum_array<fixnum> *
fixnum_array<fixnum>::create(size_t nelts, T init) {
fixnum_array *a = create(nelts);
byte *p = as_byte_ptr(a->ptr);
const byte *in = as_byte_ptr(&init);
byte elt[fixnum::BYTES];
memset(elt, 0, fixnum::BYTES);
std::copy(in, in + sizeof(T), elt);
for (uint32_t i = 0; i < nelts; ++i, p += fixnum::BYTES)
fixnum::from_bytes(p, elt, fixnum::BYTES);
return a;
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::create(const byte *data, size_t total_bytes, size_t bytes_per_elt) {
// FIXME: Should handle this error more appropriately
if (total_bytes == 0 || bytes_per_elt == 0)
return nullptr;
size_t nelts = ceilquo(total_bytes, bytes_per_elt);
fixnum_array *a = create(nelts);
byte *p = as_byte_ptr(a->ptr);
const byte *d = data;
for (size_t i = 0; i < nelts; ++i) {
fixnum::from_bytes(p, d, bytes_per_elt);
p += fixnum::BYTES;
d += bytes_per_elt;
}
return a;
}
// TODO: This doesn't belong here.
template< typename digit >
void
rotate_array(digit *out, const digit *in, int nelts, int words_per_elt, int i) {
if (i < 0) {
int j = -i;
i += nelts * ceilquo(j, nelts);
assert(i >= 0 && i < nelts);
i = nelts - i;
} else if (i >= nelts)
i %= nelts;
int pivot = i * words_per_elt;
int nwords = nelts * words_per_elt;
std::copy(in, in + nwords - pivot, out + pivot);
std::copy(in + nwords - pivot, in + nwords, out);
}
// TODO: Find a way to return a wrapper that just modifies the requested indices
// on the fly, rather than copying the whole array. Hard part will be making it
// work with map/dispatch.
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::rotate(int i) {
fixnum_array *a = create(length());
byte *p = as_byte_ptr(a->ptr);
const byte *q = as_byte_ptr(ptr);
rotate_array(p, q, nelts, fixnum::BYTES, i);
return a;
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::repeat(int ntimes) {
fixnum_array *a = create(length() * ntimes);
byte *p = as_byte_ptr(a->ptr);
const byte *q = as_byte_ptr(ptr);
int nbytes = nelts * fixnum::BYTES;
for (int i = 0; i < ntimes; ++i, p += nbytes)
std::copy(q, q + nbytes, p);
return a;
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::rotations(int ntimes) {
fixnum_array *a = create(nelts * ntimes);
byte *p = as_byte_ptr(a->ptr);
const byte *q = as_byte_ptr(ptr);
int nbytes = nelts * fixnum::BYTES;
for (int i = 0; i < ntimes; ++i, p += nbytes)
rotate_array(p, q, nelts, fixnum::BYTES, i);
return a;
}
template< typename fixnum >
int
fixnum_array<fixnum>::set(int idx, const byte *data, size_t nbytes) {
// FIXME: Better error handling
if (idx < 0 || idx >= nelts)
return -1;
int off = idx * fixnum::BYTES;
const byte *q = as_byte_ptr(ptr);
return fixnum::from_bytes(q + off, data, nbytes);
}
template< typename fixnum >
fixnum_array<fixnum>::~fixnum_array() {
if (nelts > 0)
cuda_free(ptr);
}
template< typename fixnum >
int
fixnum_array<fixnum>::length() const {
return nelts;
}
template< typename fixnum >
size_t
fixnum_array<fixnum>::retrieve_into(byte *dest, size_t dest_space, int idx) const {
if (idx < 0 || idx > nelts) {
// FIXME: This is not the right way to handle an "index out of
// bounds" error.
return 0;
}
const byte *q = as_byte_ptr(ptr);
return fixnum::to_bytes(dest, dest_space, q + idx * fixnum::BYTES);
}
// FIXME: Can return fewer than nelts elements.
template< typename fixnum >
void
fixnum_array<fixnum>::retrieve_all(byte *dest, size_t dest_space, int *dest_nelts) const {
const byte *p = as_byte_ptr(ptr);
byte *d = dest;
int max_dest_nelts = dest_space / fixnum::BYTES;
*dest_nelts = ::min(nelts, max_dest_nelts);
for (int i = 0; i < *dest_nelts; ++i) {
fixnum::to_bytes(d, fixnum::BYTES, p);
p += fixnum::BYTES;
d += fixnum::BYTES;
}
}
namespace {
std::string
fixnum_as_str(const uint8_t *fn, int nbytes) {
std::ostringstream ss;
for (int i = nbytes - 1; i >= 0; --i) {
// These IO manipulators are forgotten after each use;
// i.e. they don't apply to the next output operation (whether
// it be in the next loop iteration or in the conditional
// below.
ss << std::setfill('0') << std::setw(2) << std::hex;
ss << static_cast<int>(fn[i]);
if (i && !(i & 3))
ss << ' ';
}
return ss.str();
}
}
template< typename fixnum >
std::ostream &
operator<<(std::ostream &os, const fixnum_array<fixnum> *fn_arr) {
constexpr int fn_bytes = fixnum::BYTES;
constexpr size_t bufsz = 4096;
uint8_t arr[bufsz];
int nelts;
fn_arr->retrieve_all(arr, bufsz, &nelts);
os << "( ";
if (nelts < fn_arr->length()) {
os << "insufficient space to retrieve array";
} else if (nelts > 0) {
os << fixnum_as_str(arr, fn_bytes);
for (int i = 1; i < nelts; ++i)
os << ", " << fixnum_as_str(arr + i*fn_bytes, fn_bytes);
}
os << " )" << std::flush;
return os;
}
template< template <typename> class Func, typename fixnum, typename... Args >
__global__ void
dispatch(int nelts, Args... args) {
// Get the slot index for the current thread.
int blk_tid_offset = blockDim.x * blockIdx.x;
int tid_in_blk = threadIdx.x;
int idx = (blk_tid_offset + tid_in_blk) / fixnum::SLOT_WIDTH;
if (idx < nelts) {
// TODO: Find a way to load each argument into a register before passing
// it to fn, and then unpack the return values where they belong. This
// will guarantee that all operations happen on registers, rather than
// inadvertently operating on memory.
Func<fixnum> fn;
// TODO: This offset calculation is entwined with fixnum layout and so
// belongs somewhere else.
int off = idx * fixnum::layout::WIDTH + fixnum::layout::laneIdx();
// TODO: This is hiding a sin against memory aliasing / management /
// type-safety.
fn(args[off]...);
}
}
template< typename fixnum >
template< template <typename> class Func, typename... Args >
void
fixnum_array<fixnum>::map(Args... args) {
// TODO: Set this to the number of threads on a single SM on the host GPU.
constexpr int BLOCK_SIZE = 192;
// FIXME: WARPSIZE should come from slot_layout
constexpr int WARPSIZE = 32;
// BLOCK_SIZE must be a multiple of warpSize
static_assert(!(BLOCK_SIZE % WARPSIZE),
"block size must be a multiple of warpSize");
int nelts = ::min( { args->length()... } );
constexpr int fixnums_per_block = BLOCK_SIZE / fixnum::SLOT_WIDTH;
// FIXME: nblocks could be too big for a single kernel call to handle
int nblocks = ceilquo(nelts, fixnums_per_block);
// nblocks > 0 iff nelts > 0
if (nblocks > 0) {
hipStream_t stream;
cuda_check(hipStreamCreate(&stream), "create stream");
// cuda_stream_attach_mem(stream, src->ptr);
// cuda_stream_attach_mem(stream, ptr);
cuda_check(hipStreamSynchronize(stream), "stream sync");
hipLaunchKernelGGL(( dispatch<Func, fixnum >), dim3(nblocks), dim3(BLOCK_SIZE), 0, stream , nelts, args->ptr...);
cuda_check(hipPeekAtLastError(), "kernel invocation/run");
cuda_check(hipStreamSynchronize(stream), "stream sync");
cuda_check(hipStreamDestroy(stream), "stream destroy");
// FIXME: Only synchronize when retrieving data from array
cuda_device_synchronize();
}
}
| bb555db928a15650d6bd0f1b3ed4fb135754332c.cu | // for printing arrays
#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
// for min
#include <algorithm>
#include "util/cuda_wrap.h"
#include "fixnum_array.h"
// TODO: The only device function in this file is the dispatch kernel
// mechanism, which could arguably be placed elsewhere, thereby
// allowing this file to be compiled completely for the host.
// Notes: Read programming guide Section K.3
// - Can prefetch unified memory
// - Can advise on location of unified memory
// TODO: Can I use smart pointers? unique_ptr?
// TODO: Clean this up
namespace {
typedef std::uint8_t byte;
template< typename T >
static byte *as_byte_ptr(T *ptr) {
return reinterpret_cast<byte *>(ptr);
}
template< typename T >
static const byte *as_byte_ptr(const T *ptr) {
return reinterpret_cast<const byte *>(ptr);
}
// TODO: refactor from word_fixnum.
template< typename T >
T ceilquo(T n, T d) {
return (n + d - 1) / d;
}
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::create(size_t nelts) {
fixnum_array *a = new fixnum_array;
a->nelts = nelts;
if (nelts > 0) {
size_t nbytes = nelts * fixnum::BYTES;
cuda_malloc_managed(&a->ptr, nbytes);
}
return a;
}
template< typename fixnum >
template< typename T >
fixnum_array<fixnum> *
fixnum_array<fixnum>::create(size_t nelts, T init) {
fixnum_array *a = create(nelts);
byte *p = as_byte_ptr(a->ptr);
const byte *in = as_byte_ptr(&init);
byte elt[fixnum::BYTES];
memset(elt, 0, fixnum::BYTES);
std::copy(in, in + sizeof(T), elt);
for (uint32_t i = 0; i < nelts; ++i, p += fixnum::BYTES)
fixnum::from_bytes(p, elt, fixnum::BYTES);
return a;
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::create(const byte *data, size_t total_bytes, size_t bytes_per_elt) {
// FIXME: Should handle this error more appropriately
if (total_bytes == 0 || bytes_per_elt == 0)
return nullptr;
size_t nelts = ceilquo(total_bytes, bytes_per_elt);
fixnum_array *a = create(nelts);
byte *p = as_byte_ptr(a->ptr);
const byte *d = data;
for (size_t i = 0; i < nelts; ++i) {
fixnum::from_bytes(p, d, bytes_per_elt);
p += fixnum::BYTES;
d += bytes_per_elt;
}
return a;
}
// TODO: This doesn't belong here.
template< typename digit >
void
rotate_array(digit *out, const digit *in, int nelts, int words_per_elt, int i) {
if (i < 0) {
int j = -i;
i += nelts * ceilquo(j, nelts);
assert(i >= 0 && i < nelts);
i = nelts - i;
} else if (i >= nelts)
i %= nelts;
int pivot = i * words_per_elt;
int nwords = nelts * words_per_elt;
std::copy(in, in + nwords - pivot, out + pivot);
std::copy(in + nwords - pivot, in + nwords, out);
}
// TODO: Find a way to return a wrapper that just modifies the requested indices
// on the fly, rather than copying the whole array. Hard part will be making it
// work with map/dispatch.
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::rotate(int i) {
fixnum_array *a = create(length());
byte *p = as_byte_ptr(a->ptr);
const byte *q = as_byte_ptr(ptr);
rotate_array(p, q, nelts, fixnum::BYTES, i);
return a;
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::repeat(int ntimes) {
fixnum_array *a = create(length() * ntimes);
byte *p = as_byte_ptr(a->ptr);
const byte *q = as_byte_ptr(ptr);
int nbytes = nelts * fixnum::BYTES;
for (int i = 0; i < ntimes; ++i, p += nbytes)
std::copy(q, q + nbytes, p);
return a;
}
template< typename fixnum >
fixnum_array<fixnum> *
fixnum_array<fixnum>::rotations(int ntimes) {
fixnum_array *a = create(nelts * ntimes);
byte *p = as_byte_ptr(a->ptr);
const byte *q = as_byte_ptr(ptr);
int nbytes = nelts * fixnum::BYTES;
for (int i = 0; i < ntimes; ++i, p += nbytes)
rotate_array(p, q, nelts, fixnum::BYTES, i);
return a;
}
template< typename fixnum >
int
fixnum_array<fixnum>::set(int idx, const byte *data, size_t nbytes) {
// FIXME: Better error handling
if (idx < 0 || idx >= nelts)
return -1;
int off = idx * fixnum::BYTES;
const byte *q = as_byte_ptr(ptr);
return fixnum::from_bytes(q + off, data, nbytes);
}
template< typename fixnum >
fixnum_array<fixnum>::~fixnum_array() {
if (nelts > 0)
cuda_free(ptr);
}
template< typename fixnum >
int
fixnum_array<fixnum>::length() const {
return nelts;
}
template< typename fixnum >
size_t
fixnum_array<fixnum>::retrieve_into(byte *dest, size_t dest_space, int idx) const {
if (idx < 0 || idx > nelts) {
// FIXME: This is not the right way to handle an "index out of
// bounds" error.
return 0;
}
const byte *q = as_byte_ptr(ptr);
return fixnum::to_bytes(dest, dest_space, q + idx * fixnum::BYTES);
}
// FIXME: Can return fewer than nelts elements.
template< typename fixnum >
void
fixnum_array<fixnum>::retrieve_all(byte *dest, size_t dest_space, int *dest_nelts) const {
const byte *p = as_byte_ptr(ptr);
byte *d = dest;
int max_dest_nelts = dest_space / fixnum::BYTES;
*dest_nelts = std::min(nelts, max_dest_nelts);
for (int i = 0; i < *dest_nelts; ++i) {
fixnum::to_bytes(d, fixnum::BYTES, p);
p += fixnum::BYTES;
d += fixnum::BYTES;
}
}
namespace {
std::string
fixnum_as_str(const uint8_t *fn, int nbytes) {
std::ostringstream ss;
for (int i = nbytes - 1; i >= 0; --i) {
// These IO manipulators are forgotten after each use;
// i.e. they don't apply to the next output operation (whether
// it be in the next loop iteration or in the conditional
// below.
ss << std::setfill('0') << std::setw(2) << std::hex;
ss << static_cast<int>(fn[i]);
if (i && !(i & 3))
ss << ' ';
}
return ss.str();
}
}
template< typename fixnum >
std::ostream &
operator<<(std::ostream &os, const fixnum_array<fixnum> *fn_arr) {
constexpr int fn_bytes = fixnum::BYTES;
constexpr size_t bufsz = 4096;
uint8_t arr[bufsz];
int nelts;
fn_arr->retrieve_all(arr, bufsz, &nelts);
os << "( ";
if (nelts < fn_arr->length()) {
os << "insufficient space to retrieve array";
} else if (nelts > 0) {
os << fixnum_as_str(arr, fn_bytes);
for (int i = 1; i < nelts; ++i)
os << ", " << fixnum_as_str(arr + i*fn_bytes, fn_bytes);
}
os << " )" << std::flush;
return os;
}
template< template <typename> class Func, typename fixnum, typename... Args >
__global__ void
dispatch(int nelts, Args... args) {
// Get the slot index for the current thread.
int blk_tid_offset = blockDim.x * blockIdx.x;
int tid_in_blk = threadIdx.x;
int idx = (blk_tid_offset + tid_in_blk) / fixnum::SLOT_WIDTH;
if (idx < nelts) {
// TODO: Find a way to load each argument into a register before passing
// it to fn, and then unpack the return values where they belong. This
// will guarantee that all operations happen on registers, rather than
// inadvertently operating on memory.
Func<fixnum> fn;
// TODO: This offset calculation is entwined with fixnum layout and so
// belongs somewhere else.
int off = idx * fixnum::layout::WIDTH + fixnum::layout::laneIdx();
// TODO: This is hiding a sin against memory aliasing / management /
// type-safety.
fn(args[off]...);
}
}
template< typename fixnum >
template< template <typename> class Func, typename... Args >
void
fixnum_array<fixnum>::map(Args... args) {
// TODO: Set this to the number of threads on a single SM on the host GPU.
constexpr int BLOCK_SIZE = 192;
// FIXME: WARPSIZE should come from slot_layout
constexpr int WARPSIZE = 32;
// BLOCK_SIZE must be a multiple of warpSize
static_assert(!(BLOCK_SIZE % WARPSIZE),
"block size must be a multiple of warpSize");
int nelts = std::min( { args->length()... } );
constexpr int fixnums_per_block = BLOCK_SIZE / fixnum::SLOT_WIDTH;
// FIXME: nblocks could be too big for a single kernel call to handle
int nblocks = ceilquo(nelts, fixnums_per_block);
// nblocks > 0 iff nelts > 0
if (nblocks > 0) {
cudaStream_t stream;
cuda_check(cudaStreamCreate(&stream), "create stream");
// cuda_stream_attach_mem(stream, src->ptr);
// cuda_stream_attach_mem(stream, ptr);
cuda_check(cudaStreamSynchronize(stream), "stream sync");
dispatch<Func, fixnum ><<< nblocks, BLOCK_SIZE, 0, stream >>>(nelts, args->ptr...);
cuda_check(cudaPeekAtLastError(), "kernel invocation/run");
cuda_check(cudaStreamSynchronize(stream), "stream sync");
cuda_check(cudaStreamDestroy(stream), "stream destroy");
// FIXME: Only synchronize when retrieving data from array
cuda_device_synchronize();
}
}
|
bf61c205f0790633fb1106f170b749161b3239e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file ppmc_cuda.cu
* \brief Functions definitions for the ppm kernels, using characteristic tracing.
Written following Stone et al. 2008. */
#ifdef CUDA
#ifdef PPMC
#include"gpu.hpp"
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"ppmc_cuda.h"
#ifdef DE //PRESSURE_DE
#include"hydro_cuda.h"
#endif
/*! \fn void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
* \brief When passed a stencil of conserved variables, returns the left and right
boundary values for the interface calculated using ppm. */
__global__ void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
{
int n_cells = nx*ny*nz;
int o1, o2, o3;
if (dir == 0 ) {
o1 = 1; o2 = 2; o3 = 3;
}
if (dir == 1 ) {
o1 = 2; o2 = 3; o3 = 1;
}
if (dir == 2 ) {
o1 = 3; o2 = 1; o3 = 2;
}
// declare primative variables for each stencil
// these will be placed into registers for each thread
Real d_i, vx_i, vy_i, vz_i, p_i;
Real d_imo, vx_imo, vy_imo, vz_imo, p_imo;
Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo;
Real d_imt, vx_imt, vy_imt, vz_imt, p_imt;
Real d_ipt, vx_ipt, vy_ipt, vz_ipt, p_ipt;
// declare other variables to be used
Real a;
Real del_d_L, del_vx_L, del_vy_L, del_vz_L, del_p_L;
Real del_d_R, del_vx_R, del_vy_R, del_vz_R, del_p_R;
Real del_d_C, del_vx_C, del_vy_C, del_vz_C, del_p_C;
Real del_d_G, del_vx_G, del_vy_G, del_vz_G, del_p_G;
Real del_a_0_L, del_a_1_L, del_a_2_L, del_a_3_L, del_a_4_L;
Real del_a_0_R, del_a_1_R, del_a_2_R, del_a_3_R, del_a_4_R;
Real del_a_0_C, del_a_1_C, del_a_2_C, del_a_3_C, del_a_4_C;
Real del_a_0_G, del_a_1_G, del_a_2_G, del_a_3_G, del_a_4_G;
Real del_a_0_m, del_a_1_m, del_a_2_m, del_a_3_m, del_a_4_m;
Real lim_slope_a, lim_slope_b;
Real del_d_m_imo, del_vx_m_imo, del_vy_m_imo, del_vz_m_imo, del_p_m_imo;
Real del_d_m_i, del_vx_m_i, del_vy_m_i, del_vz_m_i, del_p_m_i;
Real del_d_m_ipo, del_vx_m_ipo, del_vy_m_ipo, del_vz_m_ipo, del_p_m_ipo;
Real d_L, vx_L, vy_L, vz_L, p_L;
Real d_R, vx_R, vy_R, vz_R, p_R;
// #ifdef CTU
#ifndef VL
Real dtodx = dt/dx;
Real d_6, vx_6, vy_6, vz_6, p_6;
Real lambda_m, lambda_0, lambda_p;
Real lambda_max, lambda_min;
Real A, B, C, D;
Real chi_1, chi_2, chi_3, chi_4, chi_5;
Real sum_1, sum_2, sum_3, sum_4, sum_5;
#endif //CTU
#ifdef DE
Real ge_i, ge_imo, ge_ipo, ge_imt, ge_ipt;
Real del_ge_L, del_ge_R, del_ge_C, del_ge_G;
Real del_ge_m_imo, del_ge_m_i, del_ge_m_ipo;
Real ge_L, ge_R;
Real E_kin, E, dge;
// #ifdef CTU
#ifndef VL
Real chi_ge, sum_ge, ge_6;
#endif
#endif
#ifdef SCALAR
Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS], scalar_imt[NSCALARS], scalar_ipt[NSCALARS];
Real del_scalar_L[NSCALARS], del_scalar_R[NSCALARS], del_scalar_C[NSCALARS], del_scalar_G[NSCALARS];
Real del_scalar_m_imo[NSCALARS], del_scalar_m_i[NSCALARS], del_scalar_m_ipo[NSCALARS];
Real scalar_L[NSCALARS], scalar_R[NSCALARS];
// #ifdef CTU
#ifndef VL
Real chi_scalar[NSCALARS], sum_scalar[NSCALARS], scalar_6[NSCALARS];
#endif
#endif
// get a thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int tid = threadIdx.x + blockId * blockDim.x;
int id;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int xs, xe, ys, ye, zs, ze;
if (dir == 0) {
xs = 2; xe = nx-3;
ys = 0; ye = ny;
zs = 0; ze = nz;
}
if (dir == 1) {
xs = 0; xe = nx;
ys = 2; ye = ny-3;
zs = 0; ze = nz;
}
if (dir == 2) {
xs = 0; xe = nx;
ys = 0; ye = ny;
zs = 2; ze = nz-3;
}
if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze)
{
// load the 5-cell stencil into registers
// cell i
id = xid + yid*nx + zid*nx*ny;
d_i = dev_conserved[ id];
vx_i = dev_conserved[o1*n_cells + id] / d_i;
vy_i = dev_conserved[o2*n_cells + id] / d_i;
vz_i = dev_conserved[o3*n_cells + id] / d_i;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_i * ( vx_i*vx_i + vy_i*vy_i + vz_i*vz_i );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_i = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_i = fmax(p_i, (Real) TINY_NUMBER);
#ifdef DE
ge_i = dge / d_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i;
}
#endif
// cell i-1
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
d_imo = dev_conserved[ id];
vx_imo = dev_conserved[o1*n_cells + id] / d_imo;
vy_imo = dev_conserved[o2*n_cells + id] / d_imo;
vz_imo = dev_conserved[o3*n_cells + id] / d_imo;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_imo * ( vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_imo = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_imo = fmax(p_imo, (Real) TINY_NUMBER);
#ifdef DE
ge_imo = dge / d_imo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo;
}
#endif
// cell i+1
if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny;
d_ipo = dev_conserved[ id];
vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo;
vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo;
vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_ipo * ( vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_ipo = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_ipo = fmax(p_ipo, (Real) TINY_NUMBER);
#ifdef DE
ge_ipo = dge / d_ipo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo;
}
#endif
// cell i-2
if (dir == 0) id = xid-2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-2)*nx*ny;
d_imt = dev_conserved[ id];
vx_imt = dev_conserved[o1*n_cells + id] / d_imt;
vy_imt = dev_conserved[o2*n_cells + id] / d_imt;
vz_imt = dev_conserved[o3*n_cells + id] / d_imt;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_imt * ( vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_imt = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_imt = (dev_conserved[4*n_cells + id] - 0.5*d_imt*(vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_imt = fmax(p_imt, (Real) TINY_NUMBER);
#ifdef DE
ge_imt = dge / d_imt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imt[i] = dev_conserved[(5+i)*n_cells + id] / d_imt;
}
#endif
// cell i+2
if (dir == 0) id = xid+2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+2)*nx*ny;
d_ipt = dev_conserved[ id];
vx_ipt = dev_conserved[o1*n_cells + id] / d_ipt;
vy_ipt = dev_conserved[o2*n_cells + id] / d_ipt;
vz_ipt = dev_conserved[o3*n_cells + id] / d_ipt;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_ipt * ( vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_ipt = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_ipt = (dev_conserved[4*n_cells + id] - 0.5*d_ipt*(vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_ipt = fmax(p_ipt, (Real) TINY_NUMBER);
#ifdef DE
ge_ipt = dge / d_ipt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipt[i] = dev_conserved[(5+i)*n_cells + id] / d_ipt;
}
#endif
//printf("%d %d %d %f %f %f %f %f\n", xid, yid, zid, d_i, vx_i, vy_i, vz_i, p_i);
// Steps 2 - 5 are repeated for cell i-1, i, and i+1
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell imo
a = sqrt(gamma*p_imo/d_imo);
// left
del_d_L = d_imo - d_imt;
del_vx_L = vx_imo - vx_imt;
del_vy_L = vy_imo - vy_imt;
del_vz_L = vz_imo - vz_imt;
del_p_L = p_imo - p_imt;
// right
del_d_R = d_i - d_imo;
del_vx_R = vx_i - vx_imo;
del_vy_R = vy_i - vy_imo;
del_vz_R = vz_i - vz_imo;
del_p_R = p_i - p_imo;
// centered
del_d_C = 0.5*(d_i - d_imt);
del_vx_C = 0.5*(vx_i - vx_imt);
del_vy_C = 0.5*(vy_i - vy_imt);
del_vz_C = 0.5*(vz_i - vz_imt);
del_p_C = 0.5*(p_i - p_imt);
// Van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_imo - ge_imt;
del_ge_R = ge_i - ge_imo;
del_ge_C = 0.5*(ge_i - ge_imt);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_imo[i] - scalar_imt[i];
del_scalar_R[i] = scalar_i[i] - scalar_imo[i];
del_scalar_C[i] = 0.5*(scalar_i[i] - scalar_imt[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_imo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_imo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_imo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_imo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_imo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_imo = -a*del_a_0_m/d_imo + a*del_a_4_m/d_imo;
del_vy_m_imo = del_a_2_m;
del_vz_m_imo = del_a_3_m;
del_p_m_imo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
// left
del_d_L = d_i - d_imo;
del_vx_L = vx_i - vx_imo;
del_vy_L = vy_i - vy_imo;
del_vz_L = vz_i - vz_imo;
del_p_L = p_i - p_imo;
// right
del_d_R = d_ipo - d_i;
del_vx_R = vx_ipo - vx_i;
del_vy_R = vy_ipo - vy_i;
del_vz_R = vz_ipo - vz_i;
del_p_R = p_ipo - p_i;
// centered
del_d_C = 0.5*(d_ipo - d_imo);
del_vx_C = 0.5*(vx_ipo - vx_imo);
del_vy_C = 0.5*(vy_ipo - vy_imo);
del_vz_C = 0.5*(vz_ipo - vz_imo);
del_p_C = 0.5*(p_ipo - p_imo);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_i - ge_imo;
del_ge_R = ge_ipo - ge_i;
del_ge_C = 0.5*(ge_ipo - ge_imo);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_i[i] - scalar_imo[i];
del_scalar_R[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_C[i] = 0.5*(scalar_ipo[i] - scalar_imo[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_i = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_i = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_i[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_i[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_i = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_i = -a*del_a_0_m/d_i + a*del_a_4_m/d_i;
del_vy_m_i = del_a_2_m;
del_vz_m_i = del_a_3_m;
del_p_m_i = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell ipo
a = sqrt(gamma*p_ipo/d_ipo);
// left
del_d_L = d_ipo - d_i;
del_vx_L = vx_ipo - vx_i;
del_vy_L = vy_ipo - vy_i;
del_vz_L = vz_ipo - vz_i;
del_p_L = p_ipo - p_i;
// right
del_d_R = d_ipt - d_ipo;
del_vx_R = vx_ipt - vx_ipo;
del_vy_R = vy_ipt - vy_ipo;
del_vz_R = vz_ipt - vz_ipo;
del_p_R = p_ipt - p_ipo;
// centered
del_d_C = 0.5*(d_ipt - d_i);
del_vx_C = 0.5*(vx_ipt- vx_i);
del_vy_C = 0.5*(vy_ipt - vy_i);
del_vz_C = 0.5*(vz_ipt - vz_i);
del_p_C = 0.5*(p_ipt - p_i);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_ipo - ge_i;
del_ge_R = ge_ipt - ge_ipo;
del_ge_C = 0.5*(ge_ipt- ge_i);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_R[i] = scalar_ipt[i] - scalar_ipo[i];
del_scalar_C[i] = 0.5*(scalar_ipt[i]- scalar_i[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_ipo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_ipo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_ipo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_ipo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_ipo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_ipo = -a*del_a_0_m / d_ipo + a* del_a_4_m / d_ipo;
del_vy_m_ipo = del_a_2_m;
del_vz_m_ipo = del_a_3_m;
del_p_m_ipo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 6 - Use parabolic interpolation to compute values at the left and right of each cell center
// Here, the subscripts L and R refer to the left and right side of the ith cell center
// Stone Eqn 46
d_L = 0.5*(d_i + d_imo) - (del_d_m_i - del_d_m_imo) / 6.0;
vx_L = 0.5*(vx_i + vx_imo) - (del_vx_m_i - del_vx_m_imo) / 6.0;
vy_L = 0.5*(vy_i + vy_imo) - (del_vy_m_i - del_vy_m_imo) / 6.0;
vz_L = 0.5*(vz_i + vz_imo) - (del_vz_m_i - del_vz_m_imo) / 6.0;
p_L = 0.5*(p_i + p_imo) - (del_p_m_i - del_p_m_imo) / 6.0;
d_R = 0.5*(d_ipo + d_i) - (del_d_m_ipo - del_d_m_i) / 6.0;
vx_R = 0.5*(vx_ipo + vx_i) - (del_vx_m_ipo - del_vx_m_i) / 6.0;
vy_R = 0.5*(vy_ipo + vy_i) - (del_vy_m_ipo - del_vy_m_i) / 6.0;
vz_R = 0.5*(vz_ipo + vz_i) - (del_vz_m_ipo - del_vz_m_i) / 6.0;
p_R = 0.5*(p_ipo + p_i) - (del_p_m_ipo - del_p_m_i) / 6.0;
#ifdef DE
ge_L = 0.5*(ge_i + ge_imo) - (del_ge_m_i - del_ge_m_imo) / 6.0;
ge_R = 0.5*(ge_ipo + ge_i) - (del_ge_m_ipo - del_ge_m_i) / 6.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] = 0.5*(scalar_i[i] + scalar_imo[i]) - (del_scalar_m_i[i] - del_scalar_m_imo[i]) / 6.0;
scalar_R[i] = 0.5*(scalar_ipo[i] + scalar_i[i]) - (del_scalar_m_ipo[i] - del_scalar_m_i[i]) / 6.0;
}
#endif
// Step 7 - Apply further monotonicity constraints to ensure the values on the left and right side
// of cell center lie between neighboring cell-centered values
// Stone Eqns 47 - 53
if ((d_R - d_i) *(d_i - d_L) <= 0) d_L = d_R = d_i;
if ((vx_R - vx_i)*(vx_i - vx_L) <= 0) vx_L = vx_R = vx_i;
if ((vy_R - vy_i)*(vy_i - vy_L) <= 0) vy_L = vy_R = vy_i;
if ((vz_R - vz_i)*(vz_i - vz_L) <= 0) vz_L = vz_R = vz_i;
if ((p_R - p_i) *(p_i - p_L) <= 0) p_L = p_R = p_i;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) > (d_R - d_L) *(d_R - d_L)) d_L = 3.0*d_i - 2.0*d_R;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) > (vx_R - vx_L)*(vx_R - vx_L)) vx_L = 3.0*vx_i - 2.0*vx_R;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) > (vy_R - vy_L)*(vy_R - vy_L)) vy_L = 3.0*vy_i - 2.0*vy_R;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) > (vz_R - vz_L)*(vz_R - vz_L)) vz_L = 3.0*vz_i - 2.0*vz_R;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) > (p_R - p_L) *(p_R - p_L)) p_L = 3.0*p_i - 2.0*p_R;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) < -(d_R - d_L) *(d_R - d_L)) d_R = 3.0*d_i - 2.0*d_L;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) < -(vx_R - vx_L)*(vx_R - vx_L)) vx_R = 3.0*vx_i - 2.0*vx_L;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) < -(vy_R - vy_L)*(vy_R - vy_L)) vy_R = 3.0*vy_i - 2.0*vy_L;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) < -(vz_R - vz_L)*(vz_R - vz_L)) vz_R = 3.0*vz_i - 2.0*vz_L;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) < -(p_R - p_L) *(p_R - p_L)) p_R = 3.0*p_i - 2.0*p_L;
d_L = fmax( fmin(d_i, d_imo), d_L );
d_L = fmin( fmax(d_i, d_imo), d_L );
d_R = fmax( fmin(d_i, d_ipo), d_R );
d_R = fmin( fmax(d_i, d_ipo), d_R );
vx_L = fmax( fmin(vx_i, vx_imo), vx_L );
vx_L = fmin( fmax(vx_i, vx_imo), vx_L );
vx_R = fmax( fmin(vx_i, vx_ipo), vx_R );
vx_R = fmin( fmax(vx_i, vx_ipo), vx_R );
vy_L = fmax( fmin(vy_i, vy_imo), vy_L );
vy_L = fmin( fmax(vy_i, vy_imo), vy_L );
vy_R = fmax( fmin(vy_i, vy_ipo), vy_R );
vy_R = fmin( fmax(vy_i, vy_ipo), vy_R );
vz_L = fmax( fmin(vz_i, vz_imo), vz_L );
vz_L = fmin( fmax(vz_i, vz_imo), vz_L );
vz_R = fmax( fmin(vz_i, vz_ipo), vz_R );
vz_R = fmin( fmax(vz_i, vz_ipo), vz_R );
p_L = fmax( fmin(p_i, p_imo), p_L );
p_L = fmin( fmax(p_i, p_imo), p_L );
p_R = fmax( fmin(p_i, p_ipo), p_R );
p_R = fmin( fmax(p_i, p_ipo), p_R );
#ifdef DE
if ((ge_R - ge_i) *(ge_i - ge_L) <= 0) ge_L = ge_R = ge_i;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) > (ge_R - ge_L) *(ge_R - ge_L)) ge_L = 3.0*ge_i - 2.0*ge_R;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) < -(ge_R - ge_L) *(ge_R - ge_L)) ge_R = 3.0*ge_i - 2.0*ge_L;
ge_L = fmax( fmin(ge_i, ge_imo), ge_L );
ge_L = fmin( fmax(ge_i, ge_imo), ge_L );
ge_R = fmax( fmin(ge_i, ge_ipo), ge_R );
ge_R = fmin( fmax(ge_i, ge_ipo), ge_R );
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if ((scalar_R[i] - scalar_i[i]) *(scalar_i[i] - scalar_L[i]) <= 0) scalar_L[i] = scalar_R[i] = scalar_i[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) > (scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_L[i] = 3.0*scalar_i[i] - 2.0*scalar_R[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) < -(scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_R[i] = 3.0*scalar_i[i] - 2.0*scalar_L[i];
scalar_L[i] = fmax( fmin(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_L[i] = fmin( fmax(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_R[i] = fmax( fmin(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
scalar_R[i] = fmin( fmax(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
}
#endif
// #ifdef CTU
#ifndef VL
// Step 8 - Compute the coefficients for the monotonized parabolic interpolation function
// Stone Eqn 54
del_d_m_i = d_R - d_L;
del_vx_m_i = vx_R - vx_L;
del_vy_m_i = vy_R - vy_L;
del_vz_m_i = vz_R - vz_L;
del_p_m_i = p_R - p_L;
d_6 = 6.0*(d_i - 0.5*(d_L + d_R));
vx_6 = 6.0*(vx_i - 0.5*(vx_L + vx_R));
vy_6 = 6.0*(vy_i - 0.5*(vy_L + vy_R));
vz_6 = 6.0*(vz_i - 0.5*(vz_L + vz_R));
p_6 = 6.0*(p_i - 0.5*(p_L + p_R));
#ifdef DE
del_ge_m_i = ge_R - ge_L;
ge_6 = 6.0*(ge_i - 0.5*(ge_L + ge_R));
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_m_i[i] = scalar_R[i] - scalar_L[i];
scalar_6[i] = 6.0*(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i]));
}
#endif
// Compute the eigenvalues of the linearized equations in the
// primative variables using the cell-centered primative variables
// recalculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
lambda_m = vx_i-a;
lambda_0 = vx_i;
lambda_p = vx_i+a;
// Step 9 - Compute the left and right interface values using monotonized parabolic interpolation
// Stone Eqns 55 & 56
// largest eigenvalue
lambda_max = fmax(lambda_p, (Real) 0);
// smallest eigenvalue
lambda_min = fmin(lambda_m, (Real) 0);
// left interface value, i+1/2
d_R = d_R - lambda_max * (0.5*dtodx)*(del_d_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*d_6);
vx_R = vx_R - lambda_max * (0.5*dtodx)*(del_vx_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vx_6);
vy_R = vy_R - lambda_max * (0.5*dtodx)*(del_vy_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vy_6);
vz_R = vz_R - lambda_max * (0.5*dtodx)*(del_vz_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vz_6);
p_R = p_R - lambda_max * (0.5*dtodx)*(del_p_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*p_6);
// right interface value, i-1/2
d_L = d_L - lambda_min * (0.5*dtodx)*(del_d_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*d_6);
vx_L = vx_L - lambda_min * (0.5*dtodx)*(del_vx_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vx_6);
vy_L = vy_L - lambda_min * (0.5*dtodx)*(del_vy_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vy_6);
vz_L = vz_L - lambda_min * (0.5*dtodx)*(del_vz_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vz_6);
p_L = p_L - lambda_min * (0.5*dtodx)*(del_p_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*p_6);
#ifdef DE
ge_R = ge_R - lambda_max * (0.5*dtodx)*(del_ge_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*ge_6);
ge_L = ge_L - lambda_min * (0.5*dtodx)*(del_ge_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*ge_6);
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] = scalar_R[i] - lambda_max * (0.5*dtodx)*(del_scalar_m_i[i] - (1.0 - (2.0/3.0)*lambda_max*dtodx)*scalar_6[i]);
scalar_L[i] = scalar_L[i] - lambda_min * (0.5*dtodx)*(del_scalar_m_i[i] + (1.0 + (2.0/3.0)*lambda_min*dtodx)*scalar_6[i]);
}
#endif
// Step 10 - Perform the characteristic tracing
// Stone Eqns 57 - 60
// left-hand interface value, i+1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_m);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_m*lambda_m);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_0);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_0*lambda_0);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
#ifdef DE
chi_ge = A*(del_ge_m_i - ge_6) + B*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = A*(del_scalar_m_i[i] - scalar_6[i]) + B*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_p);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_p*lambda_p);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections to the initial guesses for the interface values
d_R += sum_1;
vx_R += sum_2;
vy_R += sum_3;
vz_R += sum_4;
p_R += sum_5;
#ifdef DE
ge_R += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] += sum_scalar[i];
}
#endif
// right-hand interface value, i-1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_m);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_m*lambda_m);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_0);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_0*lambda_0);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
#ifdef DE
chi_ge = C*(del_ge_m_i + ge_6) + D*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = C*(del_scalar_m_i[i] + scalar_6[i]) + D*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_p);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_p*lambda_p);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections
d_L += sum_1;
vx_L += sum_2;
vy_L += sum_3;
vz_L += sum_4;
p_L += sum_5;
#ifdef DE
ge_L += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] += sum_scalar[i];
}
#endif
#endif //CTU
// enforce minimum values
d_L = fmax(d_L, (Real) TINY_NUMBER);
d_R = fmax(d_R, (Real) TINY_NUMBER);
p_L = fmax(p_L, (Real) TINY_NUMBER);
p_R = fmax(p_R, (Real) TINY_NUMBER);
// Step 11 - Send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
dev_bounds_R[ id] = d_L;
dev_bounds_R[o1*n_cells + id] = d_L*vx_L;
dev_bounds_R[o2*n_cells + id] = d_L*vy_L;
dev_bounds_R[o3*n_cells + id] = d_L*vz_L;
dev_bounds_R[4*n_cells + id] = p_L/(gamma-1.0) + 0.5*d_L*(vx_L*vx_L + vy_L*vy_L + vz_L*vz_L);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_R[(5+i)*n_cells + id] = d_L*scalar_L[i];
}
#endif
#ifdef DE
dev_bounds_R[(n_fields-1)*n_cells + id] = d_L*ge_L;
#endif
// bounds_L refers to the left side of the i+1/2 interface
id = xid + yid*nx + zid*nx*ny;
dev_bounds_L[ id] = d_R;
dev_bounds_L[o1*n_cells + id] = d_R*vx_R;
dev_bounds_L[o2*n_cells + id] = d_R*vy_R;
dev_bounds_L[o3*n_cells + id] = d_R*vz_R;
dev_bounds_L[4*n_cells + id] = p_R/(gamma-1.0) + 0.5*d_R*(vx_R*vx_R + vy_R*vy_R + vz_R*vz_R);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_L[(5+i)*n_cells + id] = d_R*scalar_R[i];
}
#endif
#ifdef DE
dev_bounds_L[(n_fields-1)*n_cells + id] = d_R*ge_R;
#endif
}
}
#endif //PPMC
#endif //CUDA
| bf61c205f0790633fb1106f170b749161b3239e9.cu | /*! \file ppmc_cuda.cu
* \brief Functions definitions for the ppm kernels, using characteristic tracing.
Written following Stone et al. 2008. */
#ifdef CUDA
#ifdef PPMC
#include"gpu.hpp"
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"ppmc_cuda.h"
#ifdef DE //PRESSURE_DE
#include"hydro_cuda.h"
#endif
/*! \fn void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
* \brief When passed a stencil of conserved variables, returns the left and right
boundary values for the interface calculated using ppm. */
__global__ void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
{
int n_cells = nx*ny*nz;
int o1, o2, o3;
if (dir == 0 ) {
o1 = 1; o2 = 2; o3 = 3;
}
if (dir == 1 ) {
o1 = 2; o2 = 3; o3 = 1;
}
if (dir == 2 ) {
o1 = 3; o2 = 1; o3 = 2;
}
// declare primative variables for each stencil
// these will be placed into registers for each thread
Real d_i, vx_i, vy_i, vz_i, p_i;
Real d_imo, vx_imo, vy_imo, vz_imo, p_imo;
Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo;
Real d_imt, vx_imt, vy_imt, vz_imt, p_imt;
Real d_ipt, vx_ipt, vy_ipt, vz_ipt, p_ipt;
// declare other variables to be used
Real a;
Real del_d_L, del_vx_L, del_vy_L, del_vz_L, del_p_L;
Real del_d_R, del_vx_R, del_vy_R, del_vz_R, del_p_R;
Real del_d_C, del_vx_C, del_vy_C, del_vz_C, del_p_C;
Real del_d_G, del_vx_G, del_vy_G, del_vz_G, del_p_G;
Real del_a_0_L, del_a_1_L, del_a_2_L, del_a_3_L, del_a_4_L;
Real del_a_0_R, del_a_1_R, del_a_2_R, del_a_3_R, del_a_4_R;
Real del_a_0_C, del_a_1_C, del_a_2_C, del_a_3_C, del_a_4_C;
Real del_a_0_G, del_a_1_G, del_a_2_G, del_a_3_G, del_a_4_G;
Real del_a_0_m, del_a_1_m, del_a_2_m, del_a_3_m, del_a_4_m;
Real lim_slope_a, lim_slope_b;
Real del_d_m_imo, del_vx_m_imo, del_vy_m_imo, del_vz_m_imo, del_p_m_imo;
Real del_d_m_i, del_vx_m_i, del_vy_m_i, del_vz_m_i, del_p_m_i;
Real del_d_m_ipo, del_vx_m_ipo, del_vy_m_ipo, del_vz_m_ipo, del_p_m_ipo;
Real d_L, vx_L, vy_L, vz_L, p_L;
Real d_R, vx_R, vy_R, vz_R, p_R;
// #ifdef CTU
#ifndef VL
Real dtodx = dt/dx;
Real d_6, vx_6, vy_6, vz_6, p_6;
Real lambda_m, lambda_0, lambda_p;
Real lambda_max, lambda_min;
Real A, B, C, D;
Real chi_1, chi_2, chi_3, chi_4, chi_5;
Real sum_1, sum_2, sum_3, sum_4, sum_5;
#endif //CTU
#ifdef DE
Real ge_i, ge_imo, ge_ipo, ge_imt, ge_ipt;
Real del_ge_L, del_ge_R, del_ge_C, del_ge_G;
Real del_ge_m_imo, del_ge_m_i, del_ge_m_ipo;
Real ge_L, ge_R;
Real E_kin, E, dge;
// #ifdef CTU
#ifndef VL
Real chi_ge, sum_ge, ge_6;
#endif
#endif
#ifdef SCALAR
Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS], scalar_imt[NSCALARS], scalar_ipt[NSCALARS];
Real del_scalar_L[NSCALARS], del_scalar_R[NSCALARS], del_scalar_C[NSCALARS], del_scalar_G[NSCALARS];
Real del_scalar_m_imo[NSCALARS], del_scalar_m_i[NSCALARS], del_scalar_m_ipo[NSCALARS];
Real scalar_L[NSCALARS], scalar_R[NSCALARS];
// #ifdef CTU
#ifndef VL
Real chi_scalar[NSCALARS], sum_scalar[NSCALARS], scalar_6[NSCALARS];
#endif
#endif
// get a thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int tid = threadIdx.x + blockId * blockDim.x;
int id;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int xs, xe, ys, ye, zs, ze;
if (dir == 0) {
xs = 2; xe = nx-3;
ys = 0; ye = ny;
zs = 0; ze = nz;
}
if (dir == 1) {
xs = 0; xe = nx;
ys = 2; ye = ny-3;
zs = 0; ze = nz;
}
if (dir == 2) {
xs = 0; xe = nx;
ys = 0; ye = ny;
zs = 2; ze = nz-3;
}
if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze)
{
// load the 5-cell stencil into registers
// cell i
id = xid + yid*nx + zid*nx*ny;
d_i = dev_conserved[ id];
vx_i = dev_conserved[o1*n_cells + id] / d_i;
vy_i = dev_conserved[o2*n_cells + id] / d_i;
vz_i = dev_conserved[o3*n_cells + id] / d_i;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_i * ( vx_i*vx_i + vy_i*vy_i + vz_i*vz_i );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_i = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_i = fmax(p_i, (Real) TINY_NUMBER);
#ifdef DE
ge_i = dge / d_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i;
}
#endif
// cell i-1
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
d_imo = dev_conserved[ id];
vx_imo = dev_conserved[o1*n_cells + id] / d_imo;
vy_imo = dev_conserved[o2*n_cells + id] / d_imo;
vz_imo = dev_conserved[o3*n_cells + id] / d_imo;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_imo * ( vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_imo = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_imo = fmax(p_imo, (Real) TINY_NUMBER);
#ifdef DE
ge_imo = dge / d_imo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo;
}
#endif
// cell i+1
if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny;
d_ipo = dev_conserved[ id];
vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo;
vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo;
vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_ipo * ( vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_ipo = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_ipo = fmax(p_ipo, (Real) TINY_NUMBER);
#ifdef DE
ge_ipo = dge / d_ipo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo;
}
#endif
// cell i-2
if (dir == 0) id = xid-2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-2)*nx*ny;
d_imt = dev_conserved[ id];
vx_imt = dev_conserved[o1*n_cells + id] / d_imt;
vy_imt = dev_conserved[o2*n_cells + id] / d_imt;
vz_imt = dev_conserved[o3*n_cells + id] / d_imt;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_imt * ( vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_imt = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_imt = (dev_conserved[4*n_cells + id] - 0.5*d_imt*(vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_imt = fmax(p_imt, (Real) TINY_NUMBER);
#ifdef DE
ge_imt = dge / d_imt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imt[i] = dev_conserved[(5+i)*n_cells + id] / d_imt;
}
#endif
// cell i+2
if (dir == 0) id = xid+2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+2)*nx*ny;
d_ipt = dev_conserved[ id];
vx_ipt = dev_conserved[o1*n_cells + id] / d_ipt;
vy_ipt = dev_conserved[o2*n_cells + id] / d_ipt;
vz_ipt = dev_conserved[o3*n_cells + id] / d_ipt;
#ifdef DE //PRESSURE_DE
E = dev_conserved[4*n_cells + id];
E_kin = 0.5 * d_ipt * ( vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt );
dge = dev_conserved[(n_fields-1)*n_cells + id];
p_ipt = Get_Pressure_From_DE( E, E - E_kin, dge, gamma );
#else
p_ipt = (dev_conserved[4*n_cells + id] - 0.5*d_ipt*(vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt)) * (gamma - 1.0);
#endif //PRESSURE_DE
p_ipt = fmax(p_ipt, (Real) TINY_NUMBER);
#ifdef DE
ge_ipt = dge / d_ipt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipt[i] = dev_conserved[(5+i)*n_cells + id] / d_ipt;
}
#endif
//printf("%d %d %d %f %f %f %f %f\n", xid, yid, zid, d_i, vx_i, vy_i, vz_i, p_i);
// Steps 2 - 5 are repeated for cell i-1, i, and i+1
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell imo
a = sqrt(gamma*p_imo/d_imo);
// left
del_d_L = d_imo - d_imt;
del_vx_L = vx_imo - vx_imt;
del_vy_L = vy_imo - vy_imt;
del_vz_L = vz_imo - vz_imt;
del_p_L = p_imo - p_imt;
// right
del_d_R = d_i - d_imo;
del_vx_R = vx_i - vx_imo;
del_vy_R = vy_i - vy_imo;
del_vz_R = vz_i - vz_imo;
del_p_R = p_i - p_imo;
// centered
del_d_C = 0.5*(d_i - d_imt);
del_vx_C = 0.5*(vx_i - vx_imt);
del_vy_C = 0.5*(vy_i - vy_imt);
del_vz_C = 0.5*(vz_i - vz_imt);
del_p_C = 0.5*(p_i - p_imt);
// Van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_imo - ge_imt;
del_ge_R = ge_i - ge_imo;
del_ge_C = 0.5*(ge_i - ge_imt);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_imo[i] - scalar_imt[i];
del_scalar_R[i] = scalar_i[i] - scalar_imo[i];
del_scalar_C[i] = 0.5*(scalar_i[i] - scalar_imt[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_imo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_imo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_imo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_imo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_imo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_imo = -a*del_a_0_m/d_imo + a*del_a_4_m/d_imo;
del_vy_m_imo = del_a_2_m;
del_vz_m_imo = del_a_3_m;
del_p_m_imo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
// left
del_d_L = d_i - d_imo;
del_vx_L = vx_i - vx_imo;
del_vy_L = vy_i - vy_imo;
del_vz_L = vz_i - vz_imo;
del_p_L = p_i - p_imo;
// right
del_d_R = d_ipo - d_i;
del_vx_R = vx_ipo - vx_i;
del_vy_R = vy_ipo - vy_i;
del_vz_R = vz_ipo - vz_i;
del_p_R = p_ipo - p_i;
// centered
del_d_C = 0.5*(d_ipo - d_imo);
del_vx_C = 0.5*(vx_ipo - vx_imo);
del_vy_C = 0.5*(vy_ipo - vy_imo);
del_vz_C = 0.5*(vz_ipo - vz_imo);
del_p_C = 0.5*(p_ipo - p_imo);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_i - ge_imo;
del_ge_R = ge_ipo - ge_i;
del_ge_C = 0.5*(ge_ipo - ge_imo);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_i[i] - scalar_imo[i];
del_scalar_R[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_C[i] = 0.5*(scalar_ipo[i] - scalar_imo[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_i = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_i = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_i[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_i[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_i = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_i = -a*del_a_0_m/d_i + a*del_a_4_m/d_i;
del_vy_m_i = del_a_2_m;
del_vz_m_i = del_a_3_m;
del_p_m_i = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell ipo
a = sqrt(gamma*p_ipo/d_ipo);
// left
del_d_L = d_ipo - d_i;
del_vx_L = vx_ipo - vx_i;
del_vy_L = vy_ipo - vy_i;
del_vz_L = vz_ipo - vz_i;
del_p_L = p_ipo - p_i;
// right
del_d_R = d_ipt - d_ipo;
del_vx_R = vx_ipt - vx_ipo;
del_vy_R = vy_ipt - vy_ipo;
del_vz_R = vz_ipt - vz_ipo;
del_p_R = p_ipt - p_ipo;
// centered
del_d_C = 0.5*(d_ipt - d_i);
del_vx_C = 0.5*(vx_ipt- vx_i);
del_vy_C = 0.5*(vy_ipt - vy_i);
del_vz_C = 0.5*(vz_ipt - vz_i);
del_p_C = 0.5*(p_ipt - p_i);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_ipo - ge_i;
del_ge_R = ge_ipt - ge_ipo;
del_ge_C = 0.5*(ge_ipt- ge_i);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_R[i] = scalar_ipt[i] - scalar_ipo[i];
del_scalar_C[i] = 0.5*(scalar_ipt[i]- scalar_i[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_ipo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_ipo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_ipo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_ipo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_ipo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_ipo = -a*del_a_0_m / d_ipo + a* del_a_4_m / d_ipo;
del_vy_m_ipo = del_a_2_m;
del_vz_m_ipo = del_a_3_m;
del_p_m_ipo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 6 - Use parabolic interpolation to compute values at the left and right of each cell center
// Here, the subscripts L and R refer to the left and right side of the ith cell center
// Stone Eqn 46
d_L = 0.5*(d_i + d_imo) - (del_d_m_i - del_d_m_imo) / 6.0;
vx_L = 0.5*(vx_i + vx_imo) - (del_vx_m_i - del_vx_m_imo) / 6.0;
vy_L = 0.5*(vy_i + vy_imo) - (del_vy_m_i - del_vy_m_imo) / 6.0;
vz_L = 0.5*(vz_i + vz_imo) - (del_vz_m_i - del_vz_m_imo) / 6.0;
p_L = 0.5*(p_i + p_imo) - (del_p_m_i - del_p_m_imo) / 6.0;
d_R = 0.5*(d_ipo + d_i) - (del_d_m_ipo - del_d_m_i) / 6.0;
vx_R = 0.5*(vx_ipo + vx_i) - (del_vx_m_ipo - del_vx_m_i) / 6.0;
vy_R = 0.5*(vy_ipo + vy_i) - (del_vy_m_ipo - del_vy_m_i) / 6.0;
vz_R = 0.5*(vz_ipo + vz_i) - (del_vz_m_ipo - del_vz_m_i) / 6.0;
p_R = 0.5*(p_ipo + p_i) - (del_p_m_ipo - del_p_m_i) / 6.0;
#ifdef DE
ge_L = 0.5*(ge_i + ge_imo) - (del_ge_m_i - del_ge_m_imo) / 6.0;
ge_R = 0.5*(ge_ipo + ge_i) - (del_ge_m_ipo - del_ge_m_i) / 6.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] = 0.5*(scalar_i[i] + scalar_imo[i]) - (del_scalar_m_i[i] - del_scalar_m_imo[i]) / 6.0;
scalar_R[i] = 0.5*(scalar_ipo[i] + scalar_i[i]) - (del_scalar_m_ipo[i] - del_scalar_m_i[i]) / 6.0;
}
#endif
// Step 7 - Apply further monotonicity constraints to ensure the values on the left and right side
// of cell center lie between neighboring cell-centered values
// Stone Eqns 47 - 53
if ((d_R - d_i) *(d_i - d_L) <= 0) d_L = d_R = d_i;
if ((vx_R - vx_i)*(vx_i - vx_L) <= 0) vx_L = vx_R = vx_i;
if ((vy_R - vy_i)*(vy_i - vy_L) <= 0) vy_L = vy_R = vy_i;
if ((vz_R - vz_i)*(vz_i - vz_L) <= 0) vz_L = vz_R = vz_i;
if ((p_R - p_i) *(p_i - p_L) <= 0) p_L = p_R = p_i;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) > (d_R - d_L) *(d_R - d_L)) d_L = 3.0*d_i - 2.0*d_R;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) > (vx_R - vx_L)*(vx_R - vx_L)) vx_L = 3.0*vx_i - 2.0*vx_R;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) > (vy_R - vy_L)*(vy_R - vy_L)) vy_L = 3.0*vy_i - 2.0*vy_R;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) > (vz_R - vz_L)*(vz_R - vz_L)) vz_L = 3.0*vz_i - 2.0*vz_R;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) > (p_R - p_L) *(p_R - p_L)) p_L = 3.0*p_i - 2.0*p_R;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) < -(d_R - d_L) *(d_R - d_L)) d_R = 3.0*d_i - 2.0*d_L;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) < -(vx_R - vx_L)*(vx_R - vx_L)) vx_R = 3.0*vx_i - 2.0*vx_L;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) < -(vy_R - vy_L)*(vy_R - vy_L)) vy_R = 3.0*vy_i - 2.0*vy_L;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) < -(vz_R - vz_L)*(vz_R - vz_L)) vz_R = 3.0*vz_i - 2.0*vz_L;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) < -(p_R - p_L) *(p_R - p_L)) p_R = 3.0*p_i - 2.0*p_L;
d_L = fmax( fmin(d_i, d_imo), d_L );
d_L = fmin( fmax(d_i, d_imo), d_L );
d_R = fmax( fmin(d_i, d_ipo), d_R );
d_R = fmin( fmax(d_i, d_ipo), d_R );
vx_L = fmax( fmin(vx_i, vx_imo), vx_L );
vx_L = fmin( fmax(vx_i, vx_imo), vx_L );
vx_R = fmax( fmin(vx_i, vx_ipo), vx_R );
vx_R = fmin( fmax(vx_i, vx_ipo), vx_R );
vy_L = fmax( fmin(vy_i, vy_imo), vy_L );
vy_L = fmin( fmax(vy_i, vy_imo), vy_L );
vy_R = fmax( fmin(vy_i, vy_ipo), vy_R );
vy_R = fmin( fmax(vy_i, vy_ipo), vy_R );
vz_L = fmax( fmin(vz_i, vz_imo), vz_L );
vz_L = fmin( fmax(vz_i, vz_imo), vz_L );
vz_R = fmax( fmin(vz_i, vz_ipo), vz_R );
vz_R = fmin( fmax(vz_i, vz_ipo), vz_R );
p_L = fmax( fmin(p_i, p_imo), p_L );
p_L = fmin( fmax(p_i, p_imo), p_L );
p_R = fmax( fmin(p_i, p_ipo), p_R );
p_R = fmin( fmax(p_i, p_ipo), p_R );
#ifdef DE
if ((ge_R - ge_i) *(ge_i - ge_L) <= 0) ge_L = ge_R = ge_i;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) > (ge_R - ge_L) *(ge_R - ge_L)) ge_L = 3.0*ge_i - 2.0*ge_R;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) < -(ge_R - ge_L) *(ge_R - ge_L)) ge_R = 3.0*ge_i - 2.0*ge_L;
ge_L = fmax( fmin(ge_i, ge_imo), ge_L );
ge_L = fmin( fmax(ge_i, ge_imo), ge_L );
ge_R = fmax( fmin(ge_i, ge_ipo), ge_R );
ge_R = fmin( fmax(ge_i, ge_ipo), ge_R );
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if ((scalar_R[i] - scalar_i[i]) *(scalar_i[i] - scalar_L[i]) <= 0) scalar_L[i] = scalar_R[i] = scalar_i[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) > (scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_L[i] = 3.0*scalar_i[i] - 2.0*scalar_R[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) < -(scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_R[i] = 3.0*scalar_i[i] - 2.0*scalar_L[i];
scalar_L[i] = fmax( fmin(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_L[i] = fmin( fmax(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_R[i] = fmax( fmin(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
scalar_R[i] = fmin( fmax(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
}
#endif
// #ifdef CTU
#ifndef VL
// Step 8 - Compute the coefficients for the monotonized parabolic interpolation function
// Stone Eqn 54
del_d_m_i = d_R - d_L;
del_vx_m_i = vx_R - vx_L;
del_vy_m_i = vy_R - vy_L;
del_vz_m_i = vz_R - vz_L;
del_p_m_i = p_R - p_L;
d_6 = 6.0*(d_i - 0.5*(d_L + d_R));
vx_6 = 6.0*(vx_i - 0.5*(vx_L + vx_R));
vy_6 = 6.0*(vy_i - 0.5*(vy_L + vy_R));
vz_6 = 6.0*(vz_i - 0.5*(vz_L + vz_R));
p_6 = 6.0*(p_i - 0.5*(p_L + p_R));
#ifdef DE
del_ge_m_i = ge_R - ge_L;
ge_6 = 6.0*(ge_i - 0.5*(ge_L + ge_R));
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_m_i[i] = scalar_R[i] - scalar_L[i];
scalar_6[i] = 6.0*(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i]));
}
#endif
// Compute the eigenvalues of the linearized equations in the
// primative variables using the cell-centered primative variables
// recalculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
lambda_m = vx_i-a;
lambda_0 = vx_i;
lambda_p = vx_i+a;
// Step 9 - Compute the left and right interface values using monotonized parabolic interpolation
// Stone Eqns 55 & 56
// largest eigenvalue
lambda_max = fmax(lambda_p, (Real) 0);
// smallest eigenvalue
lambda_min = fmin(lambda_m, (Real) 0);
// left interface value, i+1/2
d_R = d_R - lambda_max * (0.5*dtodx)*(del_d_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*d_6);
vx_R = vx_R - lambda_max * (0.5*dtodx)*(del_vx_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vx_6);
vy_R = vy_R - lambda_max * (0.5*dtodx)*(del_vy_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vy_6);
vz_R = vz_R - lambda_max * (0.5*dtodx)*(del_vz_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vz_6);
p_R = p_R - lambda_max * (0.5*dtodx)*(del_p_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*p_6);
// right interface value, i-1/2
d_L = d_L - lambda_min * (0.5*dtodx)*(del_d_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*d_6);
vx_L = vx_L - lambda_min * (0.5*dtodx)*(del_vx_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vx_6);
vy_L = vy_L - lambda_min * (0.5*dtodx)*(del_vy_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vy_6);
vz_L = vz_L - lambda_min * (0.5*dtodx)*(del_vz_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vz_6);
p_L = p_L - lambda_min * (0.5*dtodx)*(del_p_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*p_6);
#ifdef DE
ge_R = ge_R - lambda_max * (0.5*dtodx)*(del_ge_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*ge_6);
ge_L = ge_L - lambda_min * (0.5*dtodx)*(del_ge_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*ge_6);
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] = scalar_R[i] - lambda_max * (0.5*dtodx)*(del_scalar_m_i[i] - (1.0 - (2.0/3.0)*lambda_max*dtodx)*scalar_6[i]);
scalar_L[i] = scalar_L[i] - lambda_min * (0.5*dtodx)*(del_scalar_m_i[i] + (1.0 + (2.0/3.0)*lambda_min*dtodx)*scalar_6[i]);
}
#endif
// Step 10 - Perform the characteristic tracing
// Stone Eqns 57 - 60
// left-hand interface value, i+1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_m);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_m*lambda_m);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_0);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_0*lambda_0);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
#ifdef DE
chi_ge = A*(del_ge_m_i - ge_6) + B*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = A*(del_scalar_m_i[i] - scalar_6[i]) + B*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_p);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_p*lambda_p);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections to the initial guesses for the interface values
d_R += sum_1;
vx_R += sum_2;
vy_R += sum_3;
vz_R += sum_4;
p_R += sum_5;
#ifdef DE
ge_R += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] += sum_scalar[i];
}
#endif
// right-hand interface value, i-1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_m);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_m*lambda_m);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_0);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_0*lambda_0);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
#ifdef DE
chi_ge = C*(del_ge_m_i + ge_6) + D*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = C*(del_scalar_m_i[i] + scalar_6[i]) + D*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_p);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_p*lambda_p);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections
d_L += sum_1;
vx_L += sum_2;
vy_L += sum_3;
vz_L += sum_4;
p_L += sum_5;
#ifdef DE
ge_L += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] += sum_scalar[i];
}
#endif
#endif //CTU
// enforce minimum values
d_L = fmax(d_L, (Real) TINY_NUMBER);
d_R = fmax(d_R, (Real) TINY_NUMBER);
p_L = fmax(p_L, (Real) TINY_NUMBER);
p_R = fmax(p_R, (Real) TINY_NUMBER);
// Step 11 - Send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
dev_bounds_R[ id] = d_L;
dev_bounds_R[o1*n_cells + id] = d_L*vx_L;
dev_bounds_R[o2*n_cells + id] = d_L*vy_L;
dev_bounds_R[o3*n_cells + id] = d_L*vz_L;
dev_bounds_R[4*n_cells + id] = p_L/(gamma-1.0) + 0.5*d_L*(vx_L*vx_L + vy_L*vy_L + vz_L*vz_L);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_R[(5+i)*n_cells + id] = d_L*scalar_L[i];
}
#endif
#ifdef DE
dev_bounds_R[(n_fields-1)*n_cells + id] = d_L*ge_L;
#endif
// bounds_L refers to the left side of the i+1/2 interface
id = xid + yid*nx + zid*nx*ny;
dev_bounds_L[ id] = d_R;
dev_bounds_L[o1*n_cells + id] = d_R*vx_R;
dev_bounds_L[o2*n_cells + id] = d_R*vy_R;
dev_bounds_L[o3*n_cells + id] = d_R*vz_R;
dev_bounds_L[4*n_cells + id] = p_R/(gamma-1.0) + 0.5*d_R*(vx_R*vx_R + vy_R*vy_R + vz_R*vz_R);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_L[(5+i)*n_cells + id] = d_R*scalar_R[i];
}
#endif
#ifdef DE
dev_bounds_L[(n_fields-1)*n_cells + id] = d_R*ge_R;
#endif
}
}
#endif //PPMC
#endif //CUDA
|
77f1b341117452ebb25b0eefd408cca337df8492.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 77f1b341117452ebb25b0eefd408cca337df8492.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x64x1_8x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x64x1_8x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_64x32x1_8x8_8x4_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_64x32x1_8x8_8x4_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_32x64x1_8x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_32x64x1_8x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_64x32x1_8x8_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_64x32x1_8x8_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x64x1_8x8_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x64x1_8x8_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 1
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x32x1_8x8_8x4_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x32x1_8x8_8x4_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x64x1_8x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x64x1_8x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_64x32x1_8x8_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_64x32x1_8x8_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x256x8_16x64x1_4x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x256x8_16x64x1_4x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x32x1_8x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_32x32x1_8x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_32x64x1_8x8_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_32x64x1_8x8_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_64x32x1_8x8_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_64x32x1_8x8_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x32x1_8x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x32x1_8x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x64x1_8x8_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x64x1_8x8_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x32x8_64x16x1_8x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x32x8_64x16x1_8x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 8
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x32x1_8x8_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x32x1_8x8_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_16x32x1_4x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x128x8_16x32x1_4x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 256 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_16x64x1_4x8_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 256, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_64x256x8_16x64x1_4x8_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x16x1_4x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x64x8_32x16x1_4x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 128 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x32x1_8x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_128x128x8_32x32x1_8x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 256 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x16x1_8x4_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = float;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<256, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::multiplies<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_multiplies_ssrgemm_nt_n_256x64x8_64x16x1_8x4_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
f524a8eee74bd083b9cce6c2c83a2b523ad20a70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sssp.h"
#include "sssp.hip"
#include <fstream>
#include <stdio.h>
#include <time.h>
ssspMacro;
#include "graph.h"
#include "verify_sssp.h"
void allocateThreads(int* h_G[2]) {
int maxThreadsPerBlock = 1024;//prop.maxThreadsPerBlock;
#ifdef ALLOC_K2EXACTEDGES
numThreadsReq = (NumEdges - 1) / (K1 + K2) + 1;
#else
numThreadsReq = (NumEdges - 1) / (K1) + 1;
#endif
int numBlocks = (numThreadsReq - 1) / maxThreadsPerBlock + 1;
int *h_allocEdgesToThreads0, *h_allocEdgesToThreads1;
int* nodeNeighborOffset;
bool* isEdgeAllocated;
h_allocEdgesToThreads0 = new int [numThreadsReq + 1]();
h_allocEdgesToThreads1 = new int [NumEdges + 1]();
nodeNeighborOffset = new int [NumNodes]();
isEdgeAllocated = new bool [NumEdges]();
for (int i = 0; i < NumNodes; i++) {
nodeNeighborOffset[i] = 0;
}
for (int i = 0; i < NumEdges; i++) {
isEdgeAllocated[i] = false;
}
int currentEdge = 0, currentThread = 0, threadIndex = 0;
//allocK1IncomingEdgesToThreads();
/*for (int i = 0; i < NumEdges; i += K1) {
for (int j = 0; j < K1; j++) {
h_allocEdgesToThreads1[currentEdge] = currentThread;
currentEdge++;
}
currentThread++;
}*/
currentEdge = 0;
currentThread = 0;
h_allocEdgesToThreads0[0] = 0;
for (int i = 0; i < NumEdges && threadIndex < NumEdges; i += K1) {
h_allocEdgesToThreads0[currentThread] = threadIndex;
/*
// currentEdge = currentThread * (K1 + K2);
for (int j = 0; j < K1; j++) {
h_allocEdgesToThreads1[threadIndex] = threadIndex;
//currentEdge++;
threadIndex++;
}*/
while (isEdgeAllocated[currentEdge++] && currentEdge < NumEdges);
currentEdge--;
int lastIncomingEdge = currentEdge;
for (int j = 0, k = 0; k < K1 && (currentEdge + j) < NumEdges; j++) {
if (!isEdgeAllocated[currentEdge + j]) {
h_allocEdgesToThreads1[threadIndex] = currentEdge + j;
isEdgeAllocated[currentEdge + j] = 1;
k++;
threadIndex++;
lastIncomingEdge = currentEdge + j;
}
}
//printf("###Thread: %d\n", currentThread);
int j;
for (j = 0; j < K2 && currentEdge <= lastIncomingEdge && currentEdge < NumEdges && threadIndex < NumEdges;) {
int currentNode = h_G[1][currentEdge];
int numNeighbors = h_G[0][currentNode + 1] - h_G[0][currentNode];
if (numNeighbors <= nodeNeighborOffset[currentNode]) {
currentEdge++;
} else {
for (int k = nodeNeighborOffset[currentNode]; j < K2 && k < numNeighbors && threadIndex < NumEdges; k++) {
if (!isEdgeAllocated[h_G[0][currentNode] + k]) {
h_allocEdgesToThreads1[threadIndex++] = h_G[0][currentNode] + k;
isEdgeAllocated[h_G[0][currentNode] + k] = true;
j++;
nodeNeighborOffset[currentNode] ++;
}
}
currentEdge++;
}
/*else if ((numNeighbors - nodeNeighborOffset[currentNode]) >= (K2 - j)) {
printf(" Allocate %d outgoing Edges from the node %d from index %d\n", K2 - j, currentNode, threadIndex);
for (int k = 0; k < (K2 - j) && threadIndex < NumEdges; k++) {
h_allocEdgesToThreads1[threadIndex++] = h_G[0][currentNode] + k + nodeNeighborOffset[currentNode];
isEdgeAllocated[h_G[0][currentNode] + k + nodeNeighborOffset[currentNode]] = 1;
}
nodeNeighborOffset[currentNode] += K2 - j;
currentEdge++;
j = K2;
} else {
printf(" Allocate %d outgoing Edges from the node %d from index %d\n", (numNeighbors - nodeNeighborOffset[currentNode]), currentNode, threadIndex);
for (int k = 0; k < (numNeighbors - nodeNeighborOffset[currentNode]) && threadIndex < NumEdges; k++) {
h_allocEdgesToThreads1[threadIndex++] = h_G[0][currentNode] + k + nodeNeighborOffset[currentNode];
isEdgeAllocated[h_G[0][currentNode] + k + nodeNeighborOffset[currentNode]] = 1;
}
j += numNeighbors - nodeNeighborOffset[currentNode];
nodeNeighborOffset[currentNode] = numNeighbors;
currentEdge++;
}*/
}
// threadIndex++;
#ifdef ALLOC_K2EXACTEDGES
// If K2 outgoing edges are not available, then select any edges for K2.
if (j < K2) {
for (; j < K2 && currentEdge < NumEdges;) {
if (!isEdgeAllocated[currentEdge]) {
h_allocEdgesToThreads1[threadIndex++] = currentEdge;
isEdgeAllocated[currentEdge] = 1;
j++;
}
currentEdge++;
}
}
#endif
currentThread++;
}
h_allocEdgesToThreads0[currentThread] = threadIndex;
#ifdef ALLOC_K2EXACTEDGES
;
#else
numThreadsReq = currentThread;
#endif
/*printf("ALlocated Edges per thread");
for (int i = 0; i < numThreadsReq; i++) {
printf("Thread: %d ## Allocated Edges = ", i);
for (int j = h_allocEdgesToThreads0[i]; j < h_allocEdgesToThreads0[i + 1]; j++) {
printf("%d ", h_allocEdgesToThreads1[j]);
}
printf("\n");
}*/
err = hipMalloc((void **)&allocEdgesToThreads0, (numThreadsReq + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&allocEdgesToThreads1, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMemcpy(allocEdgesToThreads0, h_allocEdgesToThreads0, (numThreadsReq + 1) * sizeof(int), hipMemcpyHostToDevice);
CUDA_ERR_CHECK;
err = hipMemcpy(allocEdgesToThreads1, h_allocEdgesToThreads1, (NumEdges + 1) * sizeof(int), hipMemcpyHostToDevice);
CUDA_ERR_CHECK;
//printf("Device print Allocated Edges per thread\n");
//printAllocThreads<<<numBlocks, maxThreadsPerBlock>>>(numThreadsReq, allocEdgesToThreads0, allocEdgesToThreads1);
}
void sssp_CPU(int* G0, int* G1, int * dist, int * len, int root) {
{
fin = false;
hipOccupancyMaxPotentialBlockSize(&gm_minGridSize, &gm_blockSize,forEachKernel0, 0, 0);
gm_minGridSize = 1024;
gm_gridSize = (NumNodes + 1 + gm_blockSize - 1) / gm_blockSize;
gm_numBlocksStillToProcess = gm_gridSize, gm_offsetIntoBlocks = 0;
while (gm_numBlocksStillToProcess > 0) {
if (gm_numBlocksStillToProcess > gm_minGridSize)
gm_numBlocksKernelParameter = gm_minGridSize;
else
gm_numBlocksKernelParameter = gm_numBlocksStillToProcess;
hipLaunchKernelGGL(( forEachKernel0), dim3(gm_numBlocksKernelParameter), dim3(gm_blockSize), 0, 0, G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, updated, dist_nxt, updated_nxt, gm_offsetIntoBlocks);
CUDA_ERR_CHECK;
gm_numBlocksStillToProcess -= gm_minGridSize;
gm_offsetIntoBlocks += gm_minGridSize * gm_blockSize;
}
while ( !fin)
{
fin = true;
h___E8 = false;
err = hipMemcpyToSymbol(__E8, &h___E8, sizeof(bool), 0, hipMemcpyHostToDevice);
CUDA_ERR_CHECK;
hipOccupancyMaxPotentialBlockSize(&gm_minGridSize, &gm_blockSize,forEachKernel1, 0, 0);
gm_minGridSize = 1024;
gm_gridSize = (NumEdges + gm_blockSize - 1) / gm_blockSize;
gm_numBlocksStillToProcess = gm_gridSize, gm_offsetIntoBlocks = 0;
int iteration = 0;
while (gm_numBlocksStillToProcess > 0) {
if (gm_numBlocksStillToProcess > gm_minGridSize)
gm_numBlocksKernelParameter = gm_minGridSize;
else
gm_numBlocksKernelParameter = gm_numBlocksStillToProcess;
//printf("Kernel para = (%d, %d)\n", gm_numBlocksKernelParameter, gm_blockSize);
hipLaunchKernelGGL(( forEachKernel1), dim3(gm_numBlocksKernelParameter), dim3(gm_blockSize), 0, 0, G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, updated, dist_nxt, updated_nxt, gm_offsetIntoBlocks, allocEdgesToThreads0, allocEdgesToThreads1, numThreadsReq);
CUDA_ERR_CHECK;
gm_numBlocksStillToProcess -= gm_minGridSize;
gm_offsetIntoBlocks += gm_minGridSize * gm_blockSize;
iteration++;
}
//printf("# Iterations = %d\n", iteration);
/*int blockSize = 1024;
int numBlocks = (NumEdges - 1) / blockSize + 1;
printf("Kernel para = (%d, %d)\n", numBlocks, blockSize);
forEachKernel1<<<numBlocks, blockSize>>>(G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, updated, dist_nxt, updated_nxt, gm_offsetIntoBlocks, allocEdgesToThreads0, allocEdgesToThreads1, numThreadsReq);
CUDA_ERR_CHECK;*/
hipOccupancyMaxPotentialBlockSize(&gm_minGridSize, &gm_blockSize,forEachKernel2, 0, 0);
gm_minGridSize = 1024;
gm_gridSize = (NumNodes + 1 + gm_blockSize - 1) / gm_blockSize;
gm_numBlocksStillToProcess = gm_gridSize, gm_offsetIntoBlocks = 0;
while (gm_numBlocksStillToProcess > 0) {
if (gm_numBlocksStillToProcess > gm_minGridSize)
gm_numBlocksKernelParameter = gm_minGridSize;
else
gm_numBlocksKernelParameter = gm_numBlocksStillToProcess;
hipLaunchKernelGGL(( forEachKernel2), dim3(gm_numBlocksKernelParameter), dim3(gm_blockSize), 0, 0, G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, dist_nxt, updated, updated_nxt, gm_offsetIntoBlocks);
CUDA_ERR_CHECK;
gm_numBlocksStillToProcess -= gm_minGridSize;
gm_offsetIntoBlocks += gm_minGridSize * gm_blockSize;
}
err = hipMemcpyFromSymbol(&h___E8, __E8, sizeof(bool), 0, hipMemcpyDeviceToHost);
CUDA_ERR_CHECK;
fin = !h___E8;
}
}
}
using namespace std;
// sssp -? : for how to run generated main program
int main(int argc, char* argv[])
{
if (argc != 2 || argv[1] == NULL) {
printf("Wrong Number of Arguments");
exit(1);
}
ifstream inputFile;
inputFile.open(argv[1]);
if (!inputFile.is_open()){
printf("invalid file");
exit(1);
}
hipSetDevice(7);
inputFile >> NumNodes >> NumEdges;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
err = hipMalloc((void **)&G0, (NumNodes + 2) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&G1, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&dist, (NumNodes + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&dist_nxt, (NumNodes + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&len, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&updated, (NumNodes + 1) * sizeof(bool));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&updated_nxt, (NumNodes + 1) * sizeof(bool));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&edgeFrom, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = hipMalloc((void **)&host_threadBlockBarrierReached, 10000 * sizeof(bool));
CUDA_ERR_CHECK;
err = hipMemset(host_threadBlockBarrierReached, 0x0, 10000 * sizeof(bool));
CUDA_ERR_CHECK;
err = hipMemcpyToSymbol(gm_threadBlockBarrierReached, &host_threadBlockBarrierReached, sizeof(bool *), 0, hipMemcpyHostToDevice);
CUDA_ERR_CHECK;
int* h_G[2];
printf("Graph Population began\n");
populate(argv[1], h_G);
printf("Graph Population end\n");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Loading time(milliseconds) = %f\n", elapsedTime);
clock_t cpuStart, cpuEnd;
cpuStart = clock();
/*hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);*/
numThreadsReq = (NumEdges - 1) / K1 + 1;
//allocateThreads(h_G);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
cpuEnd = clock();
elapsedTime = ((double) (cpuEnd - cpuStart)) / CLOCKS_PER_SEC;
/* printf("Wall Time = %d\n", wall1 - wall0);
printf("CPU Time = %d\n", cpu1 - cpu0);
*/
printf("Allocating Threads time(milliseconds) = %f\n", elapsedTime);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
sssp_CPU(G0, G1, dist, len, root);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Execution time(milliseconds) = %f\n", elapsedTime);
bool gm_verify = verifysssp(h_G);
if (!gm_verify) {
printf("Verification Failed\n");
return -1;
} else {
printf("Verification Success\n");
}
err = hipFree(G0);
CUDA_ERR_CHECK;
err = hipFree(G1);
CUDA_ERR_CHECK;
err = hipFree(dist);
CUDA_ERR_CHECK;
err = hipFree(dist_nxt);
CUDA_ERR_CHECK;
err = hipFree(len);
CUDA_ERR_CHECK;
err = hipFree(updated);
CUDA_ERR_CHECK;
err = hipFree(updated_nxt);
CUDA_ERR_CHECK;
err = hipFree(host_threadBlockBarrierReached);
CUDA_ERR_CHECK;
free(h_G[0]);
free(h_G[1]);
return 0;
}
| f524a8eee74bd083b9cce6c2c83a2b523ad20a70.cu | #include "sssp.h"
#include "sssp.cu"
#include <fstream>
#include <stdio.h>
#include <time.h>
ssspMacro;
#include "graph.h"
#include "verify_sssp.h"
void allocateThreads(int* h_G[2]) {
int maxThreadsPerBlock = 1024;//prop.maxThreadsPerBlock;
#ifdef ALLOC_K2EXACTEDGES
numThreadsReq = (NumEdges - 1) / (K1 + K2) + 1;
#else
numThreadsReq = (NumEdges - 1) / (K1) + 1;
#endif
int numBlocks = (numThreadsReq - 1) / maxThreadsPerBlock + 1;
int *h_allocEdgesToThreads0, *h_allocEdgesToThreads1;
int* nodeNeighborOffset;
bool* isEdgeAllocated;
h_allocEdgesToThreads0 = new int [numThreadsReq + 1]();
h_allocEdgesToThreads1 = new int [NumEdges + 1]();
nodeNeighborOffset = new int [NumNodes]();
isEdgeAllocated = new bool [NumEdges]();
for (int i = 0; i < NumNodes; i++) {
nodeNeighborOffset[i] = 0;
}
for (int i = 0; i < NumEdges; i++) {
isEdgeAllocated[i] = false;
}
int currentEdge = 0, currentThread = 0, threadIndex = 0;
//allocK1IncomingEdgesToThreads();
/*for (int i = 0; i < NumEdges; i += K1) {
for (int j = 0; j < K1; j++) {
h_allocEdgesToThreads1[currentEdge] = currentThread;
currentEdge++;
}
currentThread++;
}*/
currentEdge = 0;
currentThread = 0;
h_allocEdgesToThreads0[0] = 0;
for (int i = 0; i < NumEdges && threadIndex < NumEdges; i += K1) {
h_allocEdgesToThreads0[currentThread] = threadIndex;
/*
// currentEdge = currentThread * (K1 + K2);
for (int j = 0; j < K1; j++) {
h_allocEdgesToThreads1[threadIndex] = threadIndex;
//currentEdge++;
threadIndex++;
}*/
while (isEdgeAllocated[currentEdge++] && currentEdge < NumEdges);
currentEdge--;
int lastIncomingEdge = currentEdge;
for (int j = 0, k = 0; k < K1 && (currentEdge + j) < NumEdges; j++) {
if (!isEdgeAllocated[currentEdge + j]) {
h_allocEdgesToThreads1[threadIndex] = currentEdge + j;
isEdgeAllocated[currentEdge + j] = 1;
k++;
threadIndex++;
lastIncomingEdge = currentEdge + j;
}
}
//printf("###Thread: %d\n", currentThread);
int j;
for (j = 0; j < K2 && currentEdge <= lastIncomingEdge && currentEdge < NumEdges && threadIndex < NumEdges;) {
int currentNode = h_G[1][currentEdge];
int numNeighbors = h_G[0][currentNode + 1] - h_G[0][currentNode];
if (numNeighbors <= nodeNeighborOffset[currentNode]) {
currentEdge++;
} else {
for (int k = nodeNeighborOffset[currentNode]; j < K2 && k < numNeighbors && threadIndex < NumEdges; k++) {
if (!isEdgeAllocated[h_G[0][currentNode] + k]) {
h_allocEdgesToThreads1[threadIndex++] = h_G[0][currentNode] + k;
isEdgeAllocated[h_G[0][currentNode] + k] = true;
j++;
nodeNeighborOffset[currentNode] ++;
}
}
currentEdge++;
}
/*else if ((numNeighbors - nodeNeighborOffset[currentNode]) >= (K2 - j)) {
printf(" Allocate %d outgoing Edges from the node %d from index %d\n", K2 - j, currentNode, threadIndex);
for (int k = 0; k < (K2 - j) && threadIndex < NumEdges; k++) {
h_allocEdgesToThreads1[threadIndex++] = h_G[0][currentNode] + k + nodeNeighborOffset[currentNode];
isEdgeAllocated[h_G[0][currentNode] + k + nodeNeighborOffset[currentNode]] = 1;
}
nodeNeighborOffset[currentNode] += K2 - j;
currentEdge++;
j = K2;
} else {
printf(" Allocate %d outgoing Edges from the node %d from index %d\n", (numNeighbors - nodeNeighborOffset[currentNode]), currentNode, threadIndex);
for (int k = 0; k < (numNeighbors - nodeNeighborOffset[currentNode]) && threadIndex < NumEdges; k++) {
h_allocEdgesToThreads1[threadIndex++] = h_G[0][currentNode] + k + nodeNeighborOffset[currentNode];
isEdgeAllocated[h_G[0][currentNode] + k + nodeNeighborOffset[currentNode]] = 1;
}
j += numNeighbors - nodeNeighborOffset[currentNode];
nodeNeighborOffset[currentNode] = numNeighbors;
currentEdge++;
}*/
}
// threadIndex++;
#ifdef ALLOC_K2EXACTEDGES
// If K2 outgoing edges are not available, then select any edges for K2.
if (j < K2) {
for (; j < K2 && currentEdge < NumEdges;) {
if (!isEdgeAllocated[currentEdge]) {
h_allocEdgesToThreads1[threadIndex++] = currentEdge;
isEdgeAllocated[currentEdge] = 1;
j++;
}
currentEdge++;
}
}
#endif
currentThread++;
}
h_allocEdgesToThreads0[currentThread] = threadIndex;
#ifdef ALLOC_K2EXACTEDGES
;
#else
numThreadsReq = currentThread;
#endif
/*printf("ALlocated Edges per thread");
for (int i = 0; i < numThreadsReq; i++) {
printf("Thread: %d ## Allocated Edges = ", i);
for (int j = h_allocEdgesToThreads0[i]; j < h_allocEdgesToThreads0[i + 1]; j++) {
printf("%d ", h_allocEdgesToThreads1[j]);
}
printf("\n");
}*/
err = cudaMalloc((void **)&allocEdgesToThreads0, (numThreadsReq + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&allocEdgesToThreads1, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMemcpy(allocEdgesToThreads0, h_allocEdgesToThreads0, (numThreadsReq + 1) * sizeof(int), cudaMemcpyHostToDevice);
CUDA_ERR_CHECK;
err = cudaMemcpy(allocEdgesToThreads1, h_allocEdgesToThreads1, (NumEdges + 1) * sizeof(int), cudaMemcpyHostToDevice);
CUDA_ERR_CHECK;
//printf("Device print Allocated Edges per thread\n");
//printAllocThreads<<<numBlocks, maxThreadsPerBlock>>>(numThreadsReq, allocEdgesToThreads0, allocEdgesToThreads1);
}
void sssp_CPU(int* G0, int* G1, int * dist, int * len, int root) {
{
fin = false;
cudaOccupancyMaxPotentialBlockSize(&gm_minGridSize, &gm_blockSize,forEachKernel0, 0, 0);
gm_minGridSize = 1024;
gm_gridSize = (NumNodes + 1 + gm_blockSize - 1) / gm_blockSize;
gm_numBlocksStillToProcess = gm_gridSize, gm_offsetIntoBlocks = 0;
while (gm_numBlocksStillToProcess > 0) {
if (gm_numBlocksStillToProcess > gm_minGridSize)
gm_numBlocksKernelParameter = gm_minGridSize;
else
gm_numBlocksKernelParameter = gm_numBlocksStillToProcess;
forEachKernel0<<<gm_numBlocksKernelParameter, gm_blockSize>>>(G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, updated, dist_nxt, updated_nxt, gm_offsetIntoBlocks);
CUDA_ERR_CHECK;
gm_numBlocksStillToProcess -= gm_minGridSize;
gm_offsetIntoBlocks += gm_minGridSize * gm_blockSize;
}
while ( !fin)
{
fin = true;
h___E8 = false;
err = cudaMemcpyToSymbol(__E8, &h___E8, sizeof(bool), 0, cudaMemcpyHostToDevice);
CUDA_ERR_CHECK;
cudaOccupancyMaxPotentialBlockSize(&gm_minGridSize, &gm_blockSize,forEachKernel1, 0, 0);
gm_minGridSize = 1024;
gm_gridSize = (NumEdges + gm_blockSize - 1) / gm_blockSize;
gm_numBlocksStillToProcess = gm_gridSize, gm_offsetIntoBlocks = 0;
int iteration = 0;
while (gm_numBlocksStillToProcess > 0) {
if (gm_numBlocksStillToProcess > gm_minGridSize)
gm_numBlocksKernelParameter = gm_minGridSize;
else
gm_numBlocksKernelParameter = gm_numBlocksStillToProcess;
//printf("Kernel para = (%d, %d)\n", gm_numBlocksKernelParameter, gm_blockSize);
forEachKernel1<<<gm_numBlocksKernelParameter, gm_blockSize>>>(G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, updated, dist_nxt, updated_nxt, gm_offsetIntoBlocks, allocEdgesToThreads0, allocEdgesToThreads1, numThreadsReq);
CUDA_ERR_CHECK;
gm_numBlocksStillToProcess -= gm_minGridSize;
gm_offsetIntoBlocks += gm_minGridSize * gm_blockSize;
iteration++;
}
//printf("# Iterations = %d\n", iteration);
/*int blockSize = 1024;
int numBlocks = (NumEdges - 1) / blockSize + 1;
printf("Kernel para = (%d, %d)\n", numBlocks, blockSize);
forEachKernel1<<<numBlocks, blockSize>>>(G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, updated, dist_nxt, updated_nxt, gm_offsetIntoBlocks, allocEdgesToThreads0, allocEdgesToThreads1, numThreadsReq);
CUDA_ERR_CHECK;*/
cudaOccupancyMaxPotentialBlockSize(&gm_minGridSize, &gm_blockSize,forEachKernel2, 0, 0);
gm_minGridSize = 1024;
gm_gridSize = (NumNodes + 1 + gm_blockSize - 1) / gm_blockSize;
gm_numBlocksStillToProcess = gm_gridSize, gm_offsetIntoBlocks = 0;
while (gm_numBlocksStillToProcess > 0) {
if (gm_numBlocksStillToProcess > gm_minGridSize)
gm_numBlocksKernelParameter = gm_minGridSize;
else
gm_numBlocksKernelParameter = gm_numBlocksStillToProcess;
forEachKernel2<<<gm_numBlocksKernelParameter, gm_blockSize>>>(G0, G1, NumNodes, NumEdges, edgeFrom, dist, len, root, dist_nxt, updated, updated_nxt, gm_offsetIntoBlocks);
CUDA_ERR_CHECK;
gm_numBlocksStillToProcess -= gm_minGridSize;
gm_offsetIntoBlocks += gm_minGridSize * gm_blockSize;
}
err = cudaMemcpyFromSymbol(&h___E8, __E8, sizeof(bool), 0, cudaMemcpyDeviceToHost);
CUDA_ERR_CHECK;
fin = !h___E8;
}
}
}
using namespace std;
// sssp -? : for how to run generated main program
int main(int argc, char* argv[])
{
if (argc != 2 || argv[1] == NULL) {
printf("Wrong Number of Arguments");
exit(1);
}
ifstream inputFile;
inputFile.open(argv[1]);
if (!inputFile.is_open()){
printf("invalid file");
exit(1);
}
cudaSetDevice(7);
inputFile >> NumNodes >> NumEdges;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
err = cudaMalloc((void **)&G0, (NumNodes + 2) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&G1, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&dist, (NumNodes + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&dist_nxt, (NumNodes + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&len, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&updated, (NumNodes + 1) * sizeof(bool));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&updated_nxt, (NumNodes + 1) * sizeof(bool));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&edgeFrom, (NumEdges + 1) * sizeof(int));
CUDA_ERR_CHECK;
err = cudaMalloc((void **)&host_threadBlockBarrierReached, 10000 * sizeof(bool));
CUDA_ERR_CHECK;
err = cudaMemset(host_threadBlockBarrierReached, 0x0, 10000 * sizeof(bool));
CUDA_ERR_CHECK;
err = cudaMemcpyToSymbol(gm_threadBlockBarrierReached, &host_threadBlockBarrierReached, sizeof(bool *), 0, cudaMemcpyHostToDevice);
CUDA_ERR_CHECK;
int* h_G[2];
printf("Graph Population began\n");
populate(argv[1], h_G);
printf("Graph Population end\n");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Loading time(milliseconds) = %f\n", elapsedTime);
clock_t cpuStart, cpuEnd;
cpuStart = clock();
/*cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);*/
numThreadsReq = (NumEdges - 1) / K1 + 1;
//allocateThreads(h_G);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cpuEnd = clock();
elapsedTime = ((double) (cpuEnd - cpuStart)) / CLOCKS_PER_SEC;
/* printf("Wall Time = %d\n", wall1 - wall0);
printf("CPU Time = %d\n", cpu1 - cpu0);
*/
printf("Allocating Threads time(milliseconds) = %f\n", elapsedTime);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sssp_CPU(G0, G1, dist, len, root);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Execution time(milliseconds) = %f\n", elapsedTime);
bool gm_verify = verifysssp(h_G);
if (!gm_verify) {
printf("Verification Failed\n");
return -1;
} else {
printf("Verification Success\n");
}
err = cudaFree(G0);
CUDA_ERR_CHECK;
err = cudaFree(G1);
CUDA_ERR_CHECK;
err = cudaFree(dist);
CUDA_ERR_CHECK;
err = cudaFree(dist_nxt);
CUDA_ERR_CHECK;
err = cudaFree(len);
CUDA_ERR_CHECK;
err = cudaFree(updated);
CUDA_ERR_CHECK;
err = cudaFree(updated_nxt);
CUDA_ERR_CHECK;
err = cudaFree(host_threadBlockBarrierReached);
CUDA_ERR_CHECK;
free(h_G[0]);
free(h_G[1]);
return 0;
}
|
153cd96d818bf0427440604da6e2d185358be020.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_feedforward( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *w_arr ){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
for(int k = 0; k < neuron_count_prev; k++){
z += w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k];
// printf("w_arr[%d] * a_arr[%d] = %.20f\n",
// sw[layer_id-1] + k*(neuron_count - 1) + idx ,
// s[layer_id-1] + k,
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]);
// printf("%.10f * %.10f = %.10f\n", w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx ],
// a_arr[s[layer_id-1] + k],
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]
// );
}
z_arr[s[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_arr[s[layer_id] + idx] = a;
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
} | 153cd96d818bf0427440604da6e2d185358be020.cu | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_feedforward( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *w_arr ){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
for(int k = 0; k < neuron_count_prev; k++){
z += w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k];
// printf("w_arr[%d] * a_arr[%d] = %.20f\n",
// sw[layer_id-1] + k*(neuron_count - 1) + idx ,
// s[layer_id-1] + k,
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]);
// printf("%.10f * %.10f = %.10f\n", w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx ],
// a_arr[s[layer_id-1] + k],
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]
// );
}
z_arr[s[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_arr[s[layer_id] + idx] = a;
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
} |
932478178d4d8382815d93ebdf6903df14b486d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cmath>
#include <ctime>
#include <numeric>
#include "book.h"
const int Nstart = 1000;
const int Ntimes = 4;
const int Nhist = 100;
#define HISTINT long long
using namespace std;
// Define a particle storage class
struct Particles {
vector<float> x, y, z;
int N;
};
struct ParticlesGPU {
float *x, *y, *z;
};
void AllocCopyGPU(Particles &p, ParticlesGPU &p2) {
// x
HANDLE_ERROR( hipMalloc ( (void**)&p2.x, p.N * sizeof(float)));
HANDLE_ERROR( hipMemcpy ( p2.x, &p.x[0], p.N * sizeof(float), hipMemcpyHostToDevice));
// y
HANDLE_ERROR( hipMalloc ( (void**)&p2.y, p.N * sizeof(float)));
HANDLE_ERROR( hipMemcpy ( p2.y, &p.y[0], p.N * sizeof(float), hipMemcpyHostToDevice));
// z
HANDLE_ERROR( hipMalloc ( (void**)&p2.z, p.N * sizeof(float)));
HANDLE_ERROR( hipMemcpy ( p2.z, &p.z[0], p.N * sizeof(float), hipMemcpyHostToDevice));
}
void FreeGPU(ParticlesGPU &p) {
hipFree(p.x);
hipFree(p.y);
hipFree(p.z);
}
void makeRandomParticles(int N, Particles &p) {
// Set number of particles
p.N = N;
// Resize the vectors
p.x.resize(N);
p.y.resize(N);
p.z.resize(N);
// Fill in the vectors
for (int ii=0; ii < N; ++ii) {
p.x[ii] = float(rand())/float(RAND_MAX);
p.y[ii] = float(rand())/float(RAND_MAX);
p.z[ii] = float(rand())/float(RAND_MAX);
}
};
// Define the GPU kernel here
__global__ void paircount_kernel(
int N1, float *x1, float *y1, float *z1,
int N2, float *x2, float *y2, float *z2,
int Nh, HISTINT *hist) {
// We distribute p1, but loop through all of p2
int ii, jj, idr;
int stride = blockDim.x * gridDim.x;
float x, y, z, dx, dy, dz, dr;
ii = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockIdx.x * Nh;
while (ii < N1) {
x = x1[ii]; y = y1[ii]; z = z1[ii];
for (jj = 0; jj < N2; ++jj) {
dx = x2[jj] - x;
dy = y2[jj] - y;
dz = z2[jj] - z;
dr = sqrtf(dx*dx + dy*dy + dz*dz);
idr = (int) (dr*Nh);
if (idr < Nh) atomicAdd( (unsigned long long*) &hist[idr + offset], 1ll);
}
ii += stride;
}
}
// Define the histogram summing kernel here
__global__ void reduce_histogram(int Nh, HISTINT *hist) {
int ii = threadIdx.x ;
int offset = blockIdx.x * Nh;
if (blockIdx.x > 0) {
while (ii < Nh) {
atomicAdd( (unsigned long long*) &hist[ii], (unsigned long long) hist[ii+offset]);
ii += blockDim.x;
}
}
}
void cpu_paircount_v2(const Particles &p1, const Particles &p2, vector<HISTINT>& hist) {
float x1, y1, z1, dx, dy, dz;
const int nblock=10;
float dr[nblock];
int idr;
for (int ii =0; ii < p1.N; ++ii) {
x1 = p1.x[ii]; y1 = p1.y[ii]; z1 = p1.z[ii];
for (int jj=0; jj < p2.N/nblock; ++jj) {
for (int kk=0; kk < nblock; ++kk) {
dx = p2.x[jj*nblock+kk]-x1;
dy = p2.y[jj*nblock+kk]-y1;
dz = p2.z[jj*nblock+kk]-z1;
dr[kk] = sqrt(dx*dx + dy*dy + dz*dz);
}
for (int kk=0; kk < nblock; ++kk) {
idr = (int)(dr[kk]*Nhist);
if (idr < Nhist) hist[idr]++;
}
}
}
}
double cpu_harness(int N, int blocks) {
Particles p1,p2;
ParticlesGPU pg1, pg2;
clock_t t0;
double dt;
float gpu_dt;
cout << "Starting harness with N=" << N << endl;
// Initialize
t0 = clock();
makeRandomParticles(N, p1);
makeRandomParticles(N, p2);
dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC);
cout << " Time to initialize: " << dt << endl;
// Set up GPU timers
hipEvent_t start, stop;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
// Move data to GPU
HANDLE_ERROR( hipEventRecord( start, 0 ) );
AllocCopyGPU(p1, pg1);
AllocCopyGPU(p2, pg2);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &gpu_dt,
start, stop ) );
cout << " Time to move data on to GPU (ms): " << gpu_dt << endl;
// Set up the gpu_hist
HISTINT *gpu_hist;
HANDLE_ERROR( hipMalloc( (void**)&gpu_hist, Nhist*blocks*sizeof(HISTINT)));
HANDLE_ERROR( hipMemset( gpu_hist, 0, Nhist*blocks*sizeof(HISTINT)));
HANDLE_ERROR( hipEventRecord( start, 0 ) );
hipLaunchKernelGGL(( paircount_kernel), dim3(blocks), dim3(512), 0, 0, N, pg1.x, pg1.y, pg1.z,
N, pg2.x, pg2.y, pg2.z, Nhist, gpu_hist);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &gpu_dt,
start, stop ) );
cout << " Time for GPU paircounts (ms): " << gpu_dt << endl;
// reduce histogram
HANDLE_ERROR( hipEventRecord( start, 0 ) );
hipLaunchKernelGGL(( reduce_histogram), dim3(blocks), dim3(512), 0, 0, Nhist, gpu_hist);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &gpu_dt,
start, stop ) );
cout << " Time to reduce GPU paircounts (ms): " << gpu_dt << endl;
// Suck back the histogram array
vector<HISTINT> hist1(Nhist);
HANDLE_ERROR( hipMemcpy( &hist1[0], gpu_hist, Nhist*sizeof(HISTINT), hipMemcpyDeviceToHost));
// Clean up
hipFree(gpu_hist);
FreeGPU(pg1); FreeGPU(pg2);
// Clean up GPU timers
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
// CPU paircounting
vector<HISTINT> hist(Nhist,0);
t0 = clock();
cpu_paircount_v2(p1, p2, hist);
dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC);
cout << " Time to count pairs v2: " << dt << endl;
//for (int ii = 0; ii < Nhist; ++ii) {
// cout << ii << " " << hist[ii] << " " << hist1[ii] << endl;
//}
// Now compare histograms
HISTINT dhist = 0, error = 0, eval = 0;
for (int ii =0; ii < Nhist; ++ii) {
dhist = abs(hist[ii] - hist1[ii]);
if (dhist > error) {
error = dhist;
eval = hist[ii];
}
}
cout << " Difference in histograms : " << error << " " << eval << endl;
return dt;
}
int main() {
double timing[Ntimes];
int i, N1;
cout << "Pair counting timing code...." << endl;
// kernel launch - 2x the number of mps gave best timing
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDeviceProperties( &prop, 0 ) );
int blocks = prop.multiProcessorCount * 2;
cout << "Using blocks = " << blocks << endl;
for (i=0, N1=Nstart; i < Ntimes; ++i, N1*=2) {
timing[i] = cpu_harness(N1, blocks);
}
}
| 932478178d4d8382815d93ebdf6903df14b486d9.cu | #include <iostream>
#include <vector>
#include <cmath>
#include <ctime>
#include <numeric>
#include "book.h"
const int Nstart = 1000;
const int Ntimes = 4;
const int Nhist = 100;
#define HISTINT long long
using namespace std;
// Define a particle storage class
struct Particles {
vector<float> x, y, z;
int N;
};
struct ParticlesGPU {
float *x, *y, *z;
};
void AllocCopyGPU(Particles &p, ParticlesGPU &p2) {
// x
HANDLE_ERROR( cudaMalloc ( (void**)&p2.x, p.N * sizeof(float)));
HANDLE_ERROR( cudaMemcpy ( p2.x, &p.x[0], p.N * sizeof(float), cudaMemcpyHostToDevice));
// y
HANDLE_ERROR( cudaMalloc ( (void**)&p2.y, p.N * sizeof(float)));
HANDLE_ERROR( cudaMemcpy ( p2.y, &p.y[0], p.N * sizeof(float), cudaMemcpyHostToDevice));
// z
HANDLE_ERROR( cudaMalloc ( (void**)&p2.z, p.N * sizeof(float)));
HANDLE_ERROR( cudaMemcpy ( p2.z, &p.z[0], p.N * sizeof(float), cudaMemcpyHostToDevice));
}
void FreeGPU(ParticlesGPU &p) {
cudaFree(p.x);
cudaFree(p.y);
cudaFree(p.z);
}
void makeRandomParticles(int N, Particles &p) {
// Set number of particles
p.N = N;
// Resize the vectors
p.x.resize(N);
p.y.resize(N);
p.z.resize(N);
// Fill in the vectors
for (int ii=0; ii < N; ++ii) {
p.x[ii] = float(rand())/float(RAND_MAX);
p.y[ii] = float(rand())/float(RAND_MAX);
p.z[ii] = float(rand())/float(RAND_MAX);
}
};
// Define the GPU kernel here
__global__ void paircount_kernel(
int N1, float *x1, float *y1, float *z1,
int N2, float *x2, float *y2, float *z2,
int Nh, HISTINT *hist) {
// We distribute p1, but loop through all of p2
int ii, jj, idr;
int stride = blockDim.x * gridDim.x;
float x, y, z, dx, dy, dz, dr;
ii = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockIdx.x * Nh;
while (ii < N1) {
x = x1[ii]; y = y1[ii]; z = z1[ii];
for (jj = 0; jj < N2; ++jj) {
dx = x2[jj] - x;
dy = y2[jj] - y;
dz = z2[jj] - z;
dr = sqrtf(dx*dx + dy*dy + dz*dz);
idr = (int) (dr*Nh);
if (idr < Nh) atomicAdd( (unsigned long long*) &hist[idr + offset], 1ll);
}
ii += stride;
}
}
// Define the histogram summing kernel here
__global__ void reduce_histogram(int Nh, HISTINT *hist) {
int ii = threadIdx.x ;
int offset = blockIdx.x * Nh;
if (blockIdx.x > 0) {
while (ii < Nh) {
atomicAdd( (unsigned long long*) &hist[ii], (unsigned long long) hist[ii+offset]);
ii += blockDim.x;
}
}
}
void cpu_paircount_v2(const Particles &p1, const Particles &p2, vector<HISTINT>& hist) {
float x1, y1, z1, dx, dy, dz;
const int nblock=10;
float dr[nblock];
int idr;
for (int ii =0; ii < p1.N; ++ii) {
x1 = p1.x[ii]; y1 = p1.y[ii]; z1 = p1.z[ii];
for (int jj=0; jj < p2.N/nblock; ++jj) {
for (int kk=0; kk < nblock; ++kk) {
dx = p2.x[jj*nblock+kk]-x1;
dy = p2.y[jj*nblock+kk]-y1;
dz = p2.z[jj*nblock+kk]-z1;
dr[kk] = sqrt(dx*dx + dy*dy + dz*dz);
}
for (int kk=0; kk < nblock; ++kk) {
idr = (int)(dr[kk]*Nhist);
if (idr < Nhist) hist[idr]++;
}
}
}
}
double cpu_harness(int N, int blocks) {
Particles p1,p2;
ParticlesGPU pg1, pg2;
clock_t t0;
double dt;
float gpu_dt;
cout << "Starting harness with N=" << N << endl;
// Initialize
t0 = clock();
makeRandomParticles(N, p1);
makeRandomParticles(N, p2);
dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC);
cout << " Time to initialize: " << dt << endl;
// Set up GPU timers
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
// Move data to GPU
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
AllocCopyGPU(p1, pg1);
AllocCopyGPU(p2, pg2);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &gpu_dt,
start, stop ) );
cout << " Time to move data on to GPU (ms): " << gpu_dt << endl;
// Set up the gpu_hist
HISTINT *gpu_hist;
HANDLE_ERROR( cudaMalloc( (void**)&gpu_hist, Nhist*blocks*sizeof(HISTINT)));
HANDLE_ERROR( cudaMemset( gpu_hist, 0, Nhist*blocks*sizeof(HISTINT)));
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
paircount_kernel<<<blocks, 512>>>(N, pg1.x, pg1.y, pg1.z,
N, pg2.x, pg2.y, pg2.z, Nhist, gpu_hist);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &gpu_dt,
start, stop ) );
cout << " Time for GPU paircounts (ms): " << gpu_dt << endl;
// reduce histogram
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
reduce_histogram<<<blocks, 512>>>(Nhist, gpu_hist);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &gpu_dt,
start, stop ) );
cout << " Time to reduce GPU paircounts (ms): " << gpu_dt << endl;
// Suck back the histogram array
vector<HISTINT> hist1(Nhist);
HANDLE_ERROR( cudaMemcpy( &hist1[0], gpu_hist, Nhist*sizeof(HISTINT), cudaMemcpyDeviceToHost));
// Clean up
cudaFree(gpu_hist);
FreeGPU(pg1); FreeGPU(pg2);
// Clean up GPU timers
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
// CPU paircounting
vector<HISTINT> hist(Nhist,0);
t0 = clock();
cpu_paircount_v2(p1, p2, hist);
dt = difftime(clock(), t0)/double(CLOCKS_PER_SEC);
cout << " Time to count pairs v2: " << dt << endl;
//for (int ii = 0; ii < Nhist; ++ii) {
// cout << ii << " " << hist[ii] << " " << hist1[ii] << endl;
//}
// Now compare histograms
HISTINT dhist = 0, error = 0, eval = 0;
for (int ii =0; ii < Nhist; ++ii) {
dhist = abs(hist[ii] - hist1[ii]);
if (dhist > error) {
error = dhist;
eval = hist[ii];
}
}
cout << " Difference in histograms : " << error << " " << eval << endl;
return dt;
}
int main() {
double timing[Ntimes];
int i, N1;
cout << "Pair counting timing code...." << endl;
// kernel launch - 2x the number of mps gave best timing
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDeviceProperties( &prop, 0 ) );
int blocks = prop.multiProcessorCount * 2;
cout << "Using blocks = " << blocks << endl;
for (i=0, N1=Nstart; i < Ntimes; ++i, N1*=2) {
timing[i] = cpu_harness(N1, blocks);
}
}
|
59928f51322f2d8298509abbfc0083feba4c6ab8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// TODO : explain what the params variable and setup_params_on_device() do
__device__
DiffusionParams params;
void setup_params_on_device(int nx, int ny, double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_check_status(
hipMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_interior(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if (i < (nx - 1) && (j < ny - 1)){
auto pos = find_pos(i, j);
S[pos] = -(4. + alpha) * U[pos] //Central inner point
+ U[pos - 1] + U[pos + 1] // east and west
+ U[pos - nx] + U[pos + nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
// TODO : implement the interior stencil
// EXTRA : can you make it use shared memory?
// S(i,j) = -(4. + alpha) * U(i,j) // central point
// + U(i-1,j) + U(i+1,j) // east and west
// + U(i,j-1) + U(i,j+1) // north and south
// + alpha * x_old(i,j)
// + dxs * U(i,j) * (1.0 - U(i,j));
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndE[j]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the WEST side
// WEST : i = 0
pos = find_pos(0, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos+1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndW[j]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = i + nx*(ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos-nx]
+ alpha*params.x_old[pos] + params.bndN[i]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the SOUTH side
// SOUTH : j = 0
pos = i + nx*0;
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndS[i]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
} // namespace kernels
//enum class Boundary {north, east, south, west};
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = options.nx;
int ny = options.ny;
// calculates the linear index into an array of width nx
// from an (i,j) coordinate pair
auto idx = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
static bool is_initialized = false;
if(!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
// apply stencil to the interior grid points
// TODO: what is the purpose of the following?
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
dim3 threadsPerBlock(16, 16);
dim3 num_blocks(calculate_grid_dim(nx, threadsPerBlock.x), calculate_grid_dim(ny, threadsPerBlock.y));
hipLaunchKernelGGL(( kernels::stencil_interior), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, S.device_data(), U.device_data());
// TODO: apply stencil to the interior grid points
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("internal kernel"); // TODO: remove after debugging
// apply stencil at east-west boundary
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
hipLaunchKernelGGL(( kernels::stencil_east_west), dim3(bnd_grid_dim_y), dim3(64), 0, 0, S.device_data(), U.device_data());
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("east-west kernel"); // TODO: remove after debugging
// apply stencil at north-south boundary
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
hipLaunchKernelGGL(( kernels::stencil_north_south), dim3(bnd_grid_dim_x), dim3(64), 0, 0, S.device_data(), U.device_data());
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("north-south kernel"); // TODO: remove after debugging
// apply stencil at corners
hipLaunchKernelGGL(( kernels::stencil_corners), dim3(1), dim3(1), 0, 0, S.device_data(), U.device_data());
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("corner kernel"); // TODO: remove after debugging
}
} // namespace operators
| 59928f51322f2d8298509abbfc0083feba4c6ab8.cu | //******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// TODO : explain what the params variable and setup_params_on_device() do
__device__
DiffusionParams params;
void setup_params_on_device(int nx, int ny, double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_check_status(
cudaMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_interior(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto j = threadIdx.y + blockDim.y*blockIdx.y;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if (i < (nx - 1) && (j < ny - 1)){
auto pos = find_pos(i, j);
S[pos] = -(4. + alpha) * U[pos] //Central inner point
+ U[pos - 1] + U[pos + 1] // east and west
+ U[pos - nx] + U[pos + nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
// TODO : implement the interior stencil
// EXTRA : can you make it use shared memory?
// S(i,j) = -(4. + alpha) * U(i,j) // central point
// + U(i-1,j) + U(i+1,j) // east and west
// + U(i,j-1) + U(i,j+1) // north and south
// + alpha * x_old(i,j)
// + dxs * U(i,j) * (1.0 - U(i,j));
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndE[j]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the WEST side
// WEST : i = 0
pos = find_pos(0, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos+1] + U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndW[j]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = i + nx*(ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos-nx]
+ alpha*params.x_old[pos] + params.bndN[i]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the SOUTH side
// SOUTH : j = 0
pos = i + nx*0;
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1] + U[pos+nx]
+ alpha*params.x_old[pos] + params.bndS[i]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos] // central point
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
} // namespace kernels
//enum class Boundary {north, east, south, west};
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = options.nx;
int ny = options.ny;
// calculates the linear index into an array of width nx
// from an (i,j) coordinate pair
auto idx = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
static bool is_initialized = false;
if(!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
// apply stencil to the interior grid points
// TODO: what is the purpose of the following?
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
dim3 threadsPerBlock(16, 16);
dim3 num_blocks(calculate_grid_dim(nx, threadsPerBlock.x), calculate_grid_dim(ny, threadsPerBlock.y));
kernels::stencil_interior<<<num_blocks, threadsPerBlock>>>(S.device_data(), U.device_data());
// TODO: apply stencil to the interior grid points
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("internal kernel"); // TODO: remove after debugging
// apply stencil at east-west boundary
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
kernels::stencil_east_west<<<bnd_grid_dim_y, 64>>>(S.device_data(), U.device_data());
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("east-west kernel"); // TODO: remove after debugging
// apply stencil at north-south boundary
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
kernels::stencil_north_south<<<bnd_grid_dim_x, 64>>>(S.device_data(), U.device_data());
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("north-south kernel"); // TODO: remove after debugging
// apply stencil at corners
kernels::stencil_corners<<<1, 1>>>(S.device_data(), U.device_data());
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("corner kernel"); // TODO: remove after debugging
}
} // namespace operators
|
54bdf8a2b7ec0dc79bc5e262c9e5f625f9b19e14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transpose_v1(float* a,float* b, int n){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i >= n || j >= n) return;
b[n*j+i] = a[n*i+j];
} | 54bdf8a2b7ec0dc79bc5e262c9e5f625f9b19e14.cu | #include "includes.h"
__global__ void transpose_v1(float* a,float* b, int n){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i >= n || j >= n) return;
b[n*j+i] = a[n*i+j];
} |
3e476e09cff76ee212ce0ef0573374406ca3eecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/ceil_div.h>
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/diag.h>
#include <ATen/ops/diag_native.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_native.h>
#endif
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k,
const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
void triu_tril_cuda_template(const Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kComplexHalf, at::ScalarType::Half, at::ScalarType::Bool,
self.scalar_type(), "triu_tril_cuda_template", [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
TORCH_IMPL_FUNC(tril_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<false>(result, self, k, "tril");
}
}
TORCH_IMPL_FUNC(triu_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<true>(result, self, k, "triu");
}
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU(__func__, {result_arg, self_arg});
checkSameType(__func__, result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = ::min(self.size(0), self.size(1) - dimension);
} else {
sz = ::min(self.size(0) + dimension, self.size(1));
}
at::native::resize_output(result, {sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_from_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
at::native::resize_output(result, {sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
hipLaunchKernelGGL(( copy_to_diagonal_kernel<scalar_t>)
, dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
return result;
}
Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
kComplexHalf, ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool,
self.scalar_type(), "diag_cuda",
[&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
| 3e476e09cff76ee212ce0ef0573374406ca3eecc.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/ceil_div.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/diag.h>
#include <ATen/ops/diag_native.h>
#include <ATen/ops/trace_native.h>
#include <ATen/ops/tril_native.h>
#include <ATen/ops/triu_native.h>
#endif
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at {
namespace native {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k,
const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
void triu_tril_cuda_template(const Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kComplexHalf, at::ScalarType::Half, at::ScalarType::Bool,
self.scalar_type(), "triu_tril_cuda_template", [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
triu_tril_kernel<scalar_t, int32_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
triu_tril_kernel<scalar_t, int64_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
TORCH_IMPL_FUNC(tril_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<false>(result, self, k, "tril");
}
}
TORCH_IMPL_FUNC(triu_cuda)(const Tensor& self, int64_t k, const Tensor &result) {
if (self.numel() != 0) {
triu_tril_cuda_template<true>(result, self, k, "triu");
}
}
// Copy the kth diagonal of a matrix B to a vector A.
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_from_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideA) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t bOffset = start + strideSum * linearIndex;
a[strideA * linearIndex] = b[bOffset];
}
}
// Copy vector B to the kth diagonal of a matrix A
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void copy_to_diagonal_kernel(
scalar_t* a,
scalar_t* b,
std::ptrdiff_t start,
std::ptrdiff_t size,
std::ptrdiff_t strideSum,
std::ptrdiff_t strideB) {
for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x;
linearIndex < size;
linearIndex += gridDim.x * blockDim.x) {
const std::ptrdiff_t aOffset = start + strideSum * linearIndex;
a[aOffset] = b[strideB * linearIndex];
}
}
template <typename scalar_t>
Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) {
TORCH_CHECK(
self.dim() == 1 || self.dim() == 2, "matrix or a vector expected");
TensorArg result_arg{result, "result", 1};
TensorArg self_arg{self, "self", 2};
checkAllSameGPU(__func__, {result_arg, self_arg});
checkSameType(__func__, result_arg, self_arg);
int nDimension = self.dim();
if (nDimension == 2) {
auto self_stride_0 = self.stride(0);
auto self_stride_1 = self.stride(1);
int sz;
if (dimension > 0) {
sz = std::min(self.size(0), self.size(1) - dimension);
} else {
sz = std::min(self.size(0) + dimension, self.size(1));
}
at::native::resize_output(result, {sz});
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride = result.stride(0);
const dim3 threads(std::min(
int(sz),
int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)));
const dim3 grid(
std::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * self_stride_1
: -dimension * self_stride_0);
// Kernel Launch
copy_from_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
sz,
self_stride_0 + self_stride_1,
result_stride);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
} else {
auto n_elems = self.numel();
auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension;
auto self_stride = self.stride(0);
at::native::resize_output(result, {sz, sz});
result.zero_();
if (sz > 0) {
at::assert_no_internal_overlap(result);
auto result_stride_0 = result.stride(0);
auto result_stride_1 = result.stride(1);
const dim3 threads(std::min(
int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock));
const dim3 grid(
std::min(int(1024), ceil_div(int(sz), int(threads.x))));
auto start =
(dimension >= 0 ? dimension * result_stride_1
: -dimension * result_stride_0);
// Kernel Launch
copy_to_diagonal_kernel<scalar_t>
<<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(),
self.data_ptr<scalar_t>(),
start,
n_elems,
result_stride_0 + result_stride_1,
self_stride);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
return result;
}
Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
kComplexHalf, ScalarType::Half, ScalarType::BFloat16, ScalarType::Bool,
self.scalar_type(), "diag_cuda",
[&] {
apply_diag<scalar_t>(result, self, dimension);
});
return result;
}
Tensor trace_cuda(const Tensor& self) {
TORCH_CHECK(self.dim() == 2, "expected a matrix");
int dimension = 0;
auto result = at::diag(self, dimension);
return result.sum();
}
} // namespace native
} // namespace at
|
0a016bb99dfe362d71af798bffce7d5c7053a5f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <cuda_invoker.hpp>
#include <float.h>
#include <stdio.h>
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace softcascade { namespace cudev {
typedef unsigned char uchar;
template <int FACTOR>
__device__ __forceinline__ uchar shrink(const uchar* ptr, const int pitch, const int y, const int x)
{
int out = 0;
#pragma unroll
for(int dy = 0; dy < FACTOR; ++dy)
#pragma unroll
for(int dx = 0; dx < FACTOR; ++dx)
{
out += ptr[dy * pitch + dx];
}
return static_cast<uchar>(out / (FACTOR * FACTOR));
}
template<int FACTOR>
__global__ void shrink(const uchar* __restrict__ hogluv, const size_t inPitch,
uchar* __restrict__ shrank, const size_t outPitch )
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar* ptr = hogluv + (FACTOR * y) * inPitch + (FACTOR * x);
shrank[ y * outPitch + x] = shrink<FACTOR>(ptr, inPitch, y, x);
}
void shrink(const cv::gpu::PtrStepSzb& channels, cv::gpu::PtrStepSzb shrunk)
{
dim3 block(32, 8);
dim3 grid(shrunk.cols / 32, shrunk.rows / 8);
hipLaunchKernelGGL(( shrink<4>), dim3(grid), dim3(block), 0, 0, (uchar*)channels.ptr(), channels.step, (uchar*)shrunk.ptr(), shrunk.step);
cudaSafeCall(hipDeviceSynchronize());
}
__device__ __forceinline__ void luv(const float& b, const float& g, const float& r, uchar& __l, uchar& __u, uchar& __v)
{
// rgb -> XYZ
float x = 0.412453f * r + 0.357580f * g + 0.180423f * b;
float y = 0.212671f * r + 0.715160f * g + 0.072169f * b;
float z = 0.019334f * r + 0.119193f * g + 0.950227f * b;
// computed for D65
const float _ur = 0.19783303699678276f;
const float _vr = 0.46833047435252234f;
const float divisor = fmax((x + 15.f * y + 3.f * z), FLT_EPSILON);
const float _u = __fdividef(4.f * x, divisor);
const float _v = __fdividef(9.f * y, divisor);
float hack = static_cast<float>(__float2int_rn(y * 2047)) / 2047;
const float L = fmax(0.f, ((116.f * cbrtf(hack)) - 16.f));
const float U = 13.f * L * (_u - _ur);
const float V = 13.f * L * (_v - _vr);
// L in [0, 100], u in [-134, 220], v in [-140, 122]
__l = static_cast<uchar>( L * (255.f / 100.f));
__u = static_cast<uchar>((U + 134.f) * (255.f / (220.f + 134.f )));
__v = static_cast<uchar>((V + 140.f) * (255.f / (122.f + 140.f )));
}
__global__ void bgr2Luv_d(const uchar* rgb, const size_t rgbPitch, uchar* luvg, const size_t luvgPitch)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
uchar3 color = ((uchar3*)(rgb + rgbPitch * y))[x];
uchar l, u, v;
luv(color.x / 255.f, color.y / 255.f, color.z / 255.f, l, u, v);
luvg[luvgPitch * y + x] = l;
luvg[luvgPitch * (y + 480) + x] = u;
luvg[luvgPitch * (y + 2 * 480) + x] = v;
}
void bgr2Luv(const cv::gpu::PtrStepSzb& bgr, cv::gpu::PtrStepSzb luv)
{
dim3 block(32, 8);
dim3 grid(bgr.cols / 32, bgr.rows / 8);
hipLaunchKernelGGL(( bgr2Luv_d), dim3(grid), dim3(block), 0, 0, (const uchar*)bgr.ptr(0), bgr.step, (uchar*)luv.ptr(0), luv.step);
cudaSafeCall(hipDeviceSynchronize());
}
template<bool isDefaultNum>
__device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy)
{
const float angle_quantum = CV_PI_F / 6.f;
float angle = atan2(dx, dy) + (angle_quantum / 2.f);
if (angle < 0) angle += CV_PI_F;
const float angle_scaling = 1.f / angle_quantum;
return static_cast<int>(angle * angle_scaling) % 6;
}
template<>
__device__ __forceinline__ int fast_angle_bin<true>(const float& dy, const float& dx)
{
int index = 0;
float max_dot = fabs(dx);
{
const float dot_product = fabs(dx * 0.8660254037844386f + dy * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 1;
}
}
{
const float dot_product = fabs(dy * 0.8660254037844386f + dx * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 2;
}
}
{
int i = 3;
float2 bin_vector_i;
bin_vector_i.x = ::cos(i * (CV_PI_F / 6.f));
bin_vector_i.y = ::sin(i * (CV_PI_F / 6.f));
const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = i;
}
}
{
const float dot_product = fabs(dx * (-0.4999999999999998f) + dy * 0.8660254037844387f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 4;
}
}
{
const float dot_product = fabs(dx * (-0.8660254037844387f) + dy * 0.49999999999999994f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 5;
}
}
return index;
}
texture<uchar, hipTextureType2D, hipReadModeElementType> tgray;
template<bool isDefaultNum>
__global__ void gray2hog(cv::gpu::PtrStepSzb mag)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float dx = tex2D(tgray, x + 1, y + 0) - tex2D(tgray, x - 1, y - 0);
const float dy = tex2D(tgray, x + 0, y + 1) - tex2D(tgray, x - 0, y - 1);
const float magnitude = sqrtf((dx * dx) + (dy * dy)) * (1.0f / sqrtf(2));
const uchar cmag = static_cast<uchar>(magnitude);
mag( 480 * 6 + y, x) = cmag;
mag( 480 * fast_angle_bin<isDefaultNum>(dy, dx) + y, x) = cmag;
}
void gray2hog(const cv::gpu::PtrStepSzb& gray, cv::gpu::PtrStepSzb mag, const int bins)
{
dim3 block(32, 8);
dim3 grid(gray.cols / 32, gray.rows / 8);
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar>();
cudaSafeCall( hipBindTexture2D(0, tgray, gray.data, desc, gray.cols, gray.rows, gray.step) );
if (bins == 6)
hipLaunchKernelGGL(( gray2hog<true>), dim3(grid), dim3(block), 0, 0, mag);
else
hipLaunchKernelGGL(( gray2hog<false>), dim3(grid), dim3(block), 0, 0, mag);
cudaSafeCall(hipDeviceSynchronize());
}
// ToDo: use textures or uncached load instruction.
__global__ void magToHist(const uchar* __restrict__ mag,
const float* __restrict__ angle, const size_t angPitch,
uchar* __restrict__ hog, const size_t hogPitch, const int fh)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int bin = (int)(angle[y * angPitch + x]);
const uchar val = mag[y * hogPitch + x];
hog[((fh * bin) + y) * hogPitch + x] = val;
}
void fillBins(cv::gpu::PtrStepSzb hogluv, const cv::gpu::PtrStepSzf& nangle,
const int fw, const int fh, const int bins, hipStream_t stream )
{
const uchar* mag = (const uchar*)hogluv.ptr(fh * bins);
uchar* hog = (uchar*)hogluv.ptr();
const float* angle = (const float*)nangle.ptr();
dim3 block(32, 8);
dim3 grid(fw / 32, fh / 8);
hipLaunchKernelGGL(( magToHist), dim3(grid), dim3(block), 0, stream, mag, angle, nangle.step / sizeof(float), hog, hogluv.step, fh);
if (!stream)
{
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
__device__ __forceinline__ float overlapArea(const Detection &a, const Detection &b)
{
int w = ::min(a.x + a.w, b.x + b.w) - ::max(a.x, b.x);
int h = ::min(a.y + a.h, b.y + b.h) - ::max(a.y, b.y);
return (w < 0 || h < 0)? 0.f : (float)(w * h);
}
texture<uint4, hipTextureType2D, hipReadModeElementType> tdetections;
__global__ void overlap(const uint* n, uchar* overlaps)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx + 1; i < total; i += 192)
{
const uint4 _a = tex2D(tdetections, i, 0);
const Detection& a = *((Detection*)(&_a));
bool excluded = false;
for (int j = i + 1; j < total; ++j)
{
const uint4 _b = tex2D(tdetections, j, 0);
const Detection& b = *((Detection*)(&_b));
float ovl = overlapArea(a, b) / ::min(a.w * a.h, b.w * b.h);
if (ovl > 0.65f)
{
int suppessed = (a.confidence > b.confidence)? j : i;
overlaps[suppessed] = 1;
excluded = excluded || (suppessed == i);
}
#if defined __CUDA_ARCH__ && (__CUDA_ARCH__ >= 120)
if (__all(excluded)) break;
#endif
}
}
}
__global__ void collect(const uint* n, uchar* overlaps, uint* ctr, uint4* suppressed)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx; i < total; i += 192)
{
if (!overlaps[i])
{
int oidx = atomicInc(ctr, 50);
suppressed[oidx] = tex2D(tdetections, i + 1, 0);
}
}
}
void suppress(const cv::gpu::PtrStepSzb& objects, cv::gpu::PtrStepSzb overlaps, cv::gpu::PtrStepSzi ndetections,
cv::gpu::PtrStepSzb suppressed, hipStream_t stream)
{
int block = 192;
int grid = 1;
hipChannelFormatDesc desc = hipCreateChannelDesc<uint4>();
size_t offset;
cudaSafeCall( hipBindTexture2D(&offset, tdetections, objects.data, desc, objects.cols / sizeof(uint4), objects.rows, objects.step));
hipLaunchKernelGGL(( overlap), dim3(grid), dim3(block), 0, 0, (uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0));
hipLaunchKernelGGL(( collect), dim3(grid), dim3(block), 0, 0, (uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0), (uint*)suppressed.ptr(0), ((uint4*)suppressed.ptr(0)) + 1);
if (!stream)
{
cudaSafeCall( hipGetLastError());
cudaSafeCall( hipDeviceSynchronize());
}
}
template<typename Policy>
struct PrefixSum
{
__device_inline__ static void apply(float& impact)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
#pragma unroll
// scan on shuffle functions
for (int i = 1; i < Policy::WARP; i *= 2)
{
const float n = __shfl_up(impact, i, Policy::WARP);
if (threadIdx.x >= i)
impact += n;
}
#else
__shared__ volatile float ptr[Policy::STA_X * Policy::STA_Y];
const int idx = threadIdx.y * Policy::STA_X + threadIdx.x;
ptr[idx] = impact;
if ( threadIdx.x >= 1) ptr [idx ] = (ptr [idx - 1] + ptr [idx]);
if ( threadIdx.x >= 2) ptr [idx ] = (ptr [idx - 2] + ptr [idx]);
if ( threadIdx.x >= 4) ptr [idx ] = (ptr [idx - 4] + ptr [idx]);
if ( threadIdx.x >= 8) ptr [idx ] = (ptr [idx - 8] + ptr [idx]);
if ( threadIdx.x >= 16) ptr [idx ] = (ptr [idx - 16] + ptr [idx]);
impact = ptr[idx];
#endif
}
};
texture<int, hipTextureType2D, hipReadModeElementType> thogluv;
template<bool isUp>
__device__ __forceinline__ float rescale(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
const float expected_new_area = farea * relScale * relScale;
float approx = (sarea == 0)? 1: __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<>
__device__ __forceinline__ float rescale<true>(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = scaledRect.z * scaledRect.w;
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = scaledRect.z * scaledRect.w;
const float expected_new_area = farea * relScale * relScale;
float approx = __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<bool isUp>
__device__ __forceinline__ int get(int x, int y, uchar4 area)
{
int a = tex2D(thogluv, x + area.x, y + area.y);
int b = tex2D(thogluv, x + area.z, y + area.y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x + area.x, y + area.w);
return (a - b + c - d);
}
template<>
__device__ __forceinline__ int get<true>(int x, int y, uchar4 area)
{
x += area.x;
y += area.y;
int a = tex2D(thogluv, x, y);
int b = tex2D(thogluv, x + area.z, y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x, y + area.w);
return (a - b + c - d);
}
texture<float2, hipTextureType2D, hipReadModeElementType> troi;
template<typename Policy>
template<bool isUp>
__device_inline__ void CascadeInvoker<Policy>::detect(Detection* objects, const uint ndetections, uint* ctr, const int downscales) const
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x;
// load Level
__shared__ Level level;
// check POI
__shared__ volatile char roiCache[Policy::STA_Y];
if (!threadIdx.y && !threadIdx.x)
((float2*)roiCache)[threadIdx.x] = tex2D(troi, blockIdx.y, x);
__syncthreads();
if (!roiCache[threadIdx.y]) return;
if (!threadIdx.x)
level = levels[downscales + blockIdx.z];
if(x >= level.workRect.x || y >= level.workRect.y) return;
int st = level.octave * level.step;
const int stEnd = st + level.step;
const int hogluvStep = gridDim.y * Policy::STA_Y;
float confidence = 0.f;
for(; st < stEnd; st += Policy::WARP)
{
const int nId = (st + threadIdx.x) * 3;
Node node = nodes[nId];
float threshold = rescale<isUp>(level, node);
int sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
int next = 1 + (int)(sum >= threshold);
node = nodes[nId + next];
threshold = rescale<isUp>(level, node);
sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
const int lShift = (next - 1) * 2 + (int)(sum >= threshold);
float impact = leaves[(st + threadIdx.x) * 4 + lShift];
PrefixSum<Policy>::apply(impact);
#if __CUDA_ARCH__ >= 120
if(__any((confidence + impact <= stages[(st + threadIdx.x)]))) st += 2048;
#endif
#if __CUDA_ARCH__ >= 300
impact = __shfl(impact, 31);
#endif
confidence += impact;
}
if(!threadIdx.x && st == stEnd && ((confidence - FLT_EPSILON) >= 0))
{
int idx = atomicInc(ctr, ndetections);
objects[idx] = Detection(__float2int_rn(x * Policy::SHRINKAGE),
__float2int_rn(y * Policy::SHRINKAGE), level.objSize.x, level.objSize.y, confidence);
}
}
template<typename Policy, bool isUp>
__global__ void soft_cascade(const CascadeInvoker<Policy> invoker, Detection* objects, const uint n, uint* ctr, const int downs)
{
invoker.template detect<isUp>(objects, n, ctr, downs);
}
template<typename Policy>
void CascadeInvoker<Policy>::operator()(const cv::gpu::PtrStepSzb& roi, const cv::gpu::PtrStepSzi& hogluv,
cv::gpu::PtrStepSz<uchar4> objects, const int downscales, const hipStream_t& stream) const
{
int fw = roi.rows;
int fh = roi.cols;
dim3 grid(fw, fh / Policy::STA_Y, downscales);
uint* ctr = (uint*)(objects.ptr(0));
Detection* det = ((Detection*)objects.ptr(0)) + 1;
uint max_det = objects.cols / sizeof(Detection);
hipChannelFormatDesc desc = hipCreateChannelDesc<int>();
cudaSafeCall( hipBindTexture2D(0, thogluv, hogluv.data, desc, hogluv.cols, hogluv.rows, hogluv.step));
hipChannelFormatDesc desc_roi = hipCreateChannelDesc<typename Policy::roi_type>();
cudaSafeCall( hipBindTexture2D(0, troi, roi.data, desc_roi, roi.cols / Policy::STA_Y, roi.rows, roi.step));
const CascadeInvoker<Policy> inv = *this;
hipLaunchKernelGGL(( soft_cascade<Policy, false>), dim3(grid), dim3(Policy::block()), 0, stream, inv, det, max_det, ctr, 0);
cudaSafeCall( hipGetLastError());
grid = dim3(fw, fh / Policy::STA_Y, min(38, scales) - downscales);
hipLaunchKernelGGL(( soft_cascade<Policy, true>), dim3(grid), dim3(Policy::block()), 0, stream, inv, det, max_det, ctr, downscales);
if (!stream)
{
cudaSafeCall( hipGetLastError());
cudaSafeCall( hipDeviceSynchronize());
}
}
template void CascadeInvoker<GK107PolicyX4>::operator()(const cv::gpu::PtrStepSzb& roi, const cv::gpu::PtrStepSzi& hogluv,
cv::gpu::PtrStepSz<uchar4> objects, const int downscales, const hipStream_t& stream) const;
}}}
| 0a016bb99dfe362d71af798bffce7d5c7053a5f1.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2008-2012, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <cuda_invoker.hpp>
#include <float.h>
#include <stdio.h>
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace softcascade { namespace cudev {
typedef unsigned char uchar;
template <int FACTOR>
__device__ __forceinline__ uchar shrink(const uchar* ptr, const int pitch, const int y, const int x)
{
int out = 0;
#pragma unroll
for(int dy = 0; dy < FACTOR; ++dy)
#pragma unroll
for(int dx = 0; dx < FACTOR; ++dx)
{
out += ptr[dy * pitch + dx];
}
return static_cast<uchar>(out / (FACTOR * FACTOR));
}
template<int FACTOR>
__global__ void shrink(const uchar* __restrict__ hogluv, const size_t inPitch,
uchar* __restrict__ shrank, const size_t outPitch )
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const uchar* ptr = hogluv + (FACTOR * y) * inPitch + (FACTOR * x);
shrank[ y * outPitch + x] = shrink<FACTOR>(ptr, inPitch, y, x);
}
void shrink(const cv::gpu::PtrStepSzb& channels, cv::gpu::PtrStepSzb shrunk)
{
dim3 block(32, 8);
dim3 grid(shrunk.cols / 32, shrunk.rows / 8);
shrink<4><<<grid, block>>>((uchar*)channels.ptr(), channels.step, (uchar*)shrunk.ptr(), shrunk.step);
cudaSafeCall(cudaDeviceSynchronize());
}
__device__ __forceinline__ void luv(const float& b, const float& g, const float& r, uchar& __l, uchar& __u, uchar& __v)
{
// rgb -> XYZ
float x = 0.412453f * r + 0.357580f * g + 0.180423f * b;
float y = 0.212671f * r + 0.715160f * g + 0.072169f * b;
float z = 0.019334f * r + 0.119193f * g + 0.950227f * b;
// computed for D65
const float _ur = 0.19783303699678276f;
const float _vr = 0.46833047435252234f;
const float divisor = fmax((x + 15.f * y + 3.f * z), FLT_EPSILON);
const float _u = __fdividef(4.f * x, divisor);
const float _v = __fdividef(9.f * y, divisor);
float hack = static_cast<float>(__float2int_rn(y * 2047)) / 2047;
const float L = fmax(0.f, ((116.f * cbrtf(hack)) - 16.f));
const float U = 13.f * L * (_u - _ur);
const float V = 13.f * L * (_v - _vr);
// L in [0, 100], u in [-134, 220], v in [-140, 122]
__l = static_cast<uchar>( L * (255.f / 100.f));
__u = static_cast<uchar>((U + 134.f) * (255.f / (220.f + 134.f )));
__v = static_cast<uchar>((V + 140.f) * (255.f / (122.f + 140.f )));
}
__global__ void bgr2Luv_d(const uchar* rgb, const size_t rgbPitch, uchar* luvg, const size_t luvgPitch)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
uchar3 color = ((uchar3*)(rgb + rgbPitch * y))[x];
uchar l, u, v;
luv(color.x / 255.f, color.y / 255.f, color.z / 255.f, l, u, v);
luvg[luvgPitch * y + x] = l;
luvg[luvgPitch * (y + 480) + x] = u;
luvg[luvgPitch * (y + 2 * 480) + x] = v;
}
void bgr2Luv(const cv::gpu::PtrStepSzb& bgr, cv::gpu::PtrStepSzb luv)
{
dim3 block(32, 8);
dim3 grid(bgr.cols / 32, bgr.rows / 8);
bgr2Luv_d<<<grid, block>>>((const uchar*)bgr.ptr(0), bgr.step, (uchar*)luv.ptr(0), luv.step);
cudaSafeCall(cudaDeviceSynchronize());
}
template<bool isDefaultNum>
__device__ __forceinline__ int fast_angle_bin(const float& dx, const float& dy)
{
const float angle_quantum = CV_PI_F / 6.f;
float angle = atan2(dx, dy) + (angle_quantum / 2.f);
if (angle < 0) angle += CV_PI_F;
const float angle_scaling = 1.f / angle_quantum;
return static_cast<int>(angle * angle_scaling) % 6;
}
template<>
__device__ __forceinline__ int fast_angle_bin<true>(const float& dy, const float& dx)
{
int index = 0;
float max_dot = fabs(dx);
{
const float dot_product = fabs(dx * 0.8660254037844386f + dy * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 1;
}
}
{
const float dot_product = fabs(dy * 0.8660254037844386f + dx * 0.5f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 2;
}
}
{
int i = 3;
float2 bin_vector_i;
bin_vector_i.x = ::cos(i * (CV_PI_F / 6.f));
bin_vector_i.y = ::sin(i * (CV_PI_F / 6.f));
const float dot_product = fabs(dx * bin_vector_i.x + dy * bin_vector_i.y);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = i;
}
}
{
const float dot_product = fabs(dx * (-0.4999999999999998f) + dy * 0.8660254037844387f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 4;
}
}
{
const float dot_product = fabs(dx * (-0.8660254037844387f) + dy * 0.49999999999999994f);
if(dot_product > max_dot)
{
max_dot = dot_product;
index = 5;
}
}
return index;
}
texture<uchar, cudaTextureType2D, cudaReadModeElementType> tgray;
template<bool isDefaultNum>
__global__ void gray2hog(cv::gpu::PtrStepSzb mag)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const float dx = tex2D(tgray, x + 1, y + 0) - tex2D(tgray, x - 1, y - 0);
const float dy = tex2D(tgray, x + 0, y + 1) - tex2D(tgray, x - 0, y - 1);
const float magnitude = sqrtf((dx * dx) + (dy * dy)) * (1.0f / sqrtf(2));
const uchar cmag = static_cast<uchar>(magnitude);
mag( 480 * 6 + y, x) = cmag;
mag( 480 * fast_angle_bin<isDefaultNum>(dy, dx) + y, x) = cmag;
}
void gray2hog(const cv::gpu::PtrStepSzb& gray, cv::gpu::PtrStepSzb mag, const int bins)
{
dim3 block(32, 8);
dim3 grid(gray.cols / 32, gray.rows / 8);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar>();
cudaSafeCall( cudaBindTexture2D(0, tgray, gray.data, desc, gray.cols, gray.rows, gray.step) );
if (bins == 6)
gray2hog<true><<<grid, block>>>(mag);
else
gray2hog<false><<<grid, block>>>(mag);
cudaSafeCall(cudaDeviceSynchronize());
}
// ToDo: use textures or uncached load instruction.
__global__ void magToHist(const uchar* __restrict__ mag,
const float* __restrict__ angle, const size_t angPitch,
uchar* __restrict__ hog, const size_t hogPitch, const int fh)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int bin = (int)(angle[y * angPitch + x]);
const uchar val = mag[y * hogPitch + x];
hog[((fh * bin) + y) * hogPitch + x] = val;
}
void fillBins(cv::gpu::PtrStepSzb hogluv, const cv::gpu::PtrStepSzf& nangle,
const int fw, const int fh, const int bins, cudaStream_t stream )
{
const uchar* mag = (const uchar*)hogluv.ptr(fh * bins);
uchar* hog = (uchar*)hogluv.ptr();
const float* angle = (const float*)nangle.ptr();
dim3 block(32, 8);
dim3 grid(fw / 32, fh / 8);
magToHist<<<grid, block, 0, stream>>>(mag, angle, nangle.step / sizeof(float), hog, hogluv.step, fh);
if (!stream)
{
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
__device__ __forceinline__ float overlapArea(const Detection &a, const Detection &b)
{
int w = ::min(a.x + a.w, b.x + b.w) - ::max(a.x, b.x);
int h = ::min(a.y + a.h, b.y + b.h) - ::max(a.y, b.y);
return (w < 0 || h < 0)? 0.f : (float)(w * h);
}
texture<uint4, cudaTextureType2D, cudaReadModeElementType> tdetections;
__global__ void overlap(const uint* n, uchar* overlaps)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx + 1; i < total; i += 192)
{
const uint4 _a = tex2D(tdetections, i, 0);
const Detection& a = *((Detection*)(&_a));
bool excluded = false;
for (int j = i + 1; j < total; ++j)
{
const uint4 _b = tex2D(tdetections, j, 0);
const Detection& b = *((Detection*)(&_b));
float ovl = overlapArea(a, b) / ::min(a.w * a.h, b.w * b.h);
if (ovl > 0.65f)
{
int suppessed = (a.confidence > b.confidence)? j : i;
overlaps[suppessed] = 1;
excluded = excluded || (suppessed == i);
}
#if defined __CUDA_ARCH__ && (__CUDA_ARCH__ >= 120)
if (__all(excluded)) break;
#endif
}
}
}
__global__ void collect(const uint* n, uchar* overlaps, uint* ctr, uint4* suppressed)
{
const int idx = threadIdx.x;
const int total = *n;
for (int i = idx; i < total; i += 192)
{
if (!overlaps[i])
{
int oidx = atomicInc(ctr, 50);
suppressed[oidx] = tex2D(tdetections, i + 1, 0);
}
}
}
void suppress(const cv::gpu::PtrStepSzb& objects, cv::gpu::PtrStepSzb overlaps, cv::gpu::PtrStepSzi ndetections,
cv::gpu::PtrStepSzb suppressed, cudaStream_t stream)
{
int block = 192;
int grid = 1;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uint4>();
size_t offset;
cudaSafeCall( cudaBindTexture2D(&offset, tdetections, objects.data, desc, objects.cols / sizeof(uint4), objects.rows, objects.step));
overlap<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0));
collect<<<grid, block>>>((uint*)ndetections.ptr(0), (uchar*)overlaps.ptr(0), (uint*)suppressed.ptr(0), ((uint4*)suppressed.ptr(0)) + 1);
if (!stream)
{
cudaSafeCall( cudaGetLastError());
cudaSafeCall( cudaDeviceSynchronize());
}
}
template<typename Policy>
struct PrefixSum
{
__device_inline__ static void apply(float& impact)
{
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
#pragma unroll
// scan on shuffle functions
for (int i = 1; i < Policy::WARP; i *= 2)
{
const float n = __shfl_up(impact, i, Policy::WARP);
if (threadIdx.x >= i)
impact += n;
}
#else
__shared__ volatile float ptr[Policy::STA_X * Policy::STA_Y];
const int idx = threadIdx.y * Policy::STA_X + threadIdx.x;
ptr[idx] = impact;
if ( threadIdx.x >= 1) ptr [idx ] = (ptr [idx - 1] + ptr [idx]);
if ( threadIdx.x >= 2) ptr [idx ] = (ptr [idx - 2] + ptr [idx]);
if ( threadIdx.x >= 4) ptr [idx ] = (ptr [idx - 4] + ptr [idx]);
if ( threadIdx.x >= 8) ptr [idx ] = (ptr [idx - 8] + ptr [idx]);
if ( threadIdx.x >= 16) ptr [idx ] = (ptr [idx - 16] + ptr [idx]);
impact = ptr[idx];
#endif
}
};
texture<int, cudaTextureType2D, cudaReadModeElementType> thogluv;
template<bool isUp>
__device__ __forceinline__ float rescale(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = (scaledRect.z - scaledRect.x) * (scaledRect.w - scaledRect.y);
const float expected_new_area = farea * relScale * relScale;
float approx = (sarea == 0)? 1: __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<>
__device__ __forceinline__ float rescale<true>(const Level& level, Node& node)
{
uchar4& scaledRect = node.rect;
float relScale = level.relScale;
float farea = scaledRect.z * scaledRect.w;
// rescale
scaledRect.x = __float2int_rn(relScale * scaledRect.x);
scaledRect.y = __float2int_rn(relScale * scaledRect.y);
scaledRect.z = __float2int_rn(relScale * scaledRect.z);
scaledRect.w = __float2int_rn(relScale * scaledRect.w);
float sarea = scaledRect.z * scaledRect.w;
const float expected_new_area = farea * relScale * relScale;
float approx = __fdividef(sarea, expected_new_area);
float rootThreshold = (node.threshold & 0x0FFFFFFFU) * approx * level.scaling[(node.threshold >> 28) > 6];
return rootThreshold;
}
template<bool isUp>
__device__ __forceinline__ int get(int x, int y, uchar4 area)
{
int a = tex2D(thogluv, x + area.x, y + area.y);
int b = tex2D(thogluv, x + area.z, y + area.y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x + area.x, y + area.w);
return (a - b + c - d);
}
template<>
__device__ __forceinline__ int get<true>(int x, int y, uchar4 area)
{
x += area.x;
y += area.y;
int a = tex2D(thogluv, x, y);
int b = tex2D(thogluv, x + area.z, y);
int c = tex2D(thogluv, x + area.z, y + area.w);
int d = tex2D(thogluv, x, y + area.w);
return (a - b + c - d);
}
texture<float2, cudaTextureType2D, cudaReadModeElementType> troi;
template<typename Policy>
template<bool isUp>
__device_inline__ void CascadeInvoker<Policy>::detect(Detection* objects, const uint ndetections, uint* ctr, const int downscales) const
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = blockIdx.x;
// load Level
__shared__ Level level;
// check POI
__shared__ volatile char roiCache[Policy::STA_Y];
if (!threadIdx.y && !threadIdx.x)
((float2*)roiCache)[threadIdx.x] = tex2D(troi, blockIdx.y, x);
__syncthreads();
if (!roiCache[threadIdx.y]) return;
if (!threadIdx.x)
level = levels[downscales + blockIdx.z];
if(x >= level.workRect.x || y >= level.workRect.y) return;
int st = level.octave * level.step;
const int stEnd = st + level.step;
const int hogluvStep = gridDim.y * Policy::STA_Y;
float confidence = 0.f;
for(; st < stEnd; st += Policy::WARP)
{
const int nId = (st + threadIdx.x) * 3;
Node node = nodes[nId];
float threshold = rescale<isUp>(level, node);
int sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
int next = 1 + (int)(sum >= threshold);
node = nodes[nId + next];
threshold = rescale<isUp>(level, node);
sum = get<isUp>(x, y + (node.threshold >> 28) * hogluvStep, node.rect);
const int lShift = (next - 1) * 2 + (int)(sum >= threshold);
float impact = leaves[(st + threadIdx.x) * 4 + lShift];
PrefixSum<Policy>::apply(impact);
#if __CUDA_ARCH__ >= 120
if(__any((confidence + impact <= stages[(st + threadIdx.x)]))) st += 2048;
#endif
#if __CUDA_ARCH__ >= 300
impact = __shfl(impact, 31);
#endif
confidence += impact;
}
if(!threadIdx.x && st == stEnd && ((confidence - FLT_EPSILON) >= 0))
{
int idx = atomicInc(ctr, ndetections);
objects[idx] = Detection(__float2int_rn(x * Policy::SHRINKAGE),
__float2int_rn(y * Policy::SHRINKAGE), level.objSize.x, level.objSize.y, confidence);
}
}
template<typename Policy, bool isUp>
__global__ void soft_cascade(const CascadeInvoker<Policy> invoker, Detection* objects, const uint n, uint* ctr, const int downs)
{
invoker.template detect<isUp>(objects, n, ctr, downs);
}
template<typename Policy>
void CascadeInvoker<Policy>::operator()(const cv::gpu::PtrStepSzb& roi, const cv::gpu::PtrStepSzi& hogluv,
cv::gpu::PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const
{
int fw = roi.rows;
int fh = roi.cols;
dim3 grid(fw, fh / Policy::STA_Y, downscales);
uint* ctr = (uint*)(objects.ptr(0));
Detection* det = ((Detection*)objects.ptr(0)) + 1;
uint max_det = objects.cols / sizeof(Detection);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
cudaSafeCall( cudaBindTexture2D(0, thogluv, hogluv.data, desc, hogluv.cols, hogluv.rows, hogluv.step));
cudaChannelFormatDesc desc_roi = cudaCreateChannelDesc<typename Policy::roi_type>();
cudaSafeCall( cudaBindTexture2D(0, troi, roi.data, desc_roi, roi.cols / Policy::STA_Y, roi.rows, roi.step));
const CascadeInvoker<Policy> inv = *this;
soft_cascade<Policy, false><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, 0);
cudaSafeCall( cudaGetLastError());
grid = dim3(fw, fh / Policy::STA_Y, min(38, scales) - downscales);
soft_cascade<Policy, true><<<grid, Policy::block(), 0, stream>>>(inv, det, max_det, ctr, downscales);
if (!stream)
{
cudaSafeCall( cudaGetLastError());
cudaSafeCall( cudaDeviceSynchronize());
}
}
template void CascadeInvoker<GK107PolicyX4>::operator()(const cv::gpu::PtrStepSzb& roi, const cv::gpu::PtrStepSzi& hogluv,
cv::gpu::PtrStepSz<uchar4> objects, const int downscales, const cudaStream_t& stream) const;
}}}
|
f0318b541090765bd3a2a396b26620d940d8859f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunn_ClassNLLCriterion_updateGradInput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
hipMalloc(&gradInput, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
int nframe = 1;
int ndim = 1;
float grad = 1;
int ntarget = 1;
float *weights = NULL;
hipMalloc(&weights, XSIZE*YSIZE);
bool apply_weights = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunn_ClassNLLCriterion_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,target,nframe,ndim,grad,ntarget,weights,apply_weights);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunn_ClassNLLCriterion_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,target,nframe,ndim,grad,ntarget,weights,apply_weights);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunn_ClassNLLCriterion_updateGradInput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, gradInput,target,nframe,ndim,grad,ntarget,weights,apply_weights);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f0318b541090765bd3a2a396b26620d940d8859f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunn_ClassNLLCriterion_updateGradInput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *gradInput = NULL;
cudaMalloc(&gradInput, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
int nframe = 1;
int ndim = 1;
float grad = 1;
int ntarget = 1;
float *weights = NULL;
cudaMalloc(&weights, XSIZE*YSIZE);
bool apply_weights = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunn_ClassNLLCriterion_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,target,nframe,ndim,grad,ntarget,weights,apply_weights);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunn_ClassNLLCriterion_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,target,nframe,ndim,grad,ntarget,weights,apply_weights);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunn_ClassNLLCriterion_updateGradInput_kernel<<<gridBlock,threadBlock>>>(gradInput,target,nframe,ndim,grad,ntarget,weights,apply_weights);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c47fa461ce6ef8b078df1d019eea860a63037af9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void add2( double * v1, const double * v2 )
{
int idx = threadIdx.x;
v1[idx] += v2[idx];
} | c47fa461ce6ef8b078df1d019eea860a63037af9.cu | __global__ void add2( double * v1, const double * v2 )
{
int idx = threadIdx.x;
v1[idx] += v2[idx];
} |
d579d70ceb8c652ff89eb10fcdf1a40b922abff5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* vanillaOpt.cu
* Monte Carlo methods in CUDA
* Dissertation project
* Created on: 06/feb/2018
* Author: Marco Matteo Buzzulini
* Copyright 2018 Marco Matteo Buzzulini. All rights reserved.
*/
#include "MonteCarlo.h"
#define NTHREADS 1
#define THREADS 128
#define BLOCKS 512
#define SIMPB 131072
extern "C" float host_bsCall ( OptionData );
extern "C" OptionValue host_vanillaOpt(OptionData, int);
extern "C" OptionValue dev_vanillaOpt(OptionData *, int, int, int);
extern "C" void printOption( OptionData o);
const float S = 100;
const float K = 100;
const float R = 0.048790;
const float V = 0.2;
const float T = 1.f;
int main(int argc, const char * argv[]) {
/*------------------------- VARIABLES ------------------------------*/
// Option Data
OptionData option;
option.v = V;
option.s = S;
option.k= K;
option.r= R;
option.t= T;
// Simulation
unsigned int numBlocks, numThreads[NTHREADS], i, SIMS;
OptionValue CPU_sim, GPU_sim[NTHREADS];
float CPU_timeSpent=0, GPU_timeSpent[NTHREADS], speedup[NTHREADS];
float bs_price, difference[NTHREADS], diff;
hipEvent_t d_start, d_stop;
/*----------------------- START PROGRAM ----------------------------*/
printf("Vanilla Option Pricing\n");
// CUDA parameters for parallel execution
numBlocks = BLOCKS;
//numThreads[1] = 128;
numThreads[0] = THREADS;
//numThreads[2] = 512;
//numThreads[3] = 1024;
printf("Inserisci il numero di simulazioni (x131.072): ");
scanf("%d",&SIMS);
SIMS *= SIMPB;
printf("\nScenari di Monte Carlo: %d\n",SIMS);
// Print Option details
printOption(option);
// Time instructions
CudaCheck( hipEventCreate( &d_start ));
CudaCheck( hipEventCreate( &d_stop ));
// Black & Scholes price
bs_price = host_bsCall(option);
printf("\nPrezzo Black & Scholes: %f\n",bs_price);
// CPU Monte Carlo
printf("\nMonte Carlo execution on CPU...\n");
CudaCheck( hipEventRecord( d_start, 0 ));
CPU_sim=host_vanillaOpt(option, SIMS);
CudaCheck( hipEventRecord( d_stop, 0));
CudaCheck( hipEventSynchronize( d_stop ));
CudaCheck( hipEventElapsedTime( &CPU_timeSpent, d_start, d_stop ));
//CPU_timeSpent /= 1000;
diff = abs(CPU_sim.Expected - bs_price);
// GPU Monte Carlo
printf("\nMonte Carlo execution on GPU...\n");
for(i=0; i<NTHREADS; i++){
printf("(NumBlocks, NumSimulations) : (%d,%d) x %d simulations per thread\n", BLOCKS, numThreads[i], SIMS/BLOCKS/numThreads[i]);
CudaCheck( hipEventRecord( d_start, 0 ));
GPU_sim[i] = dev_vanillaOpt(&option, numBlocks, numThreads[i],SIMS);
CudaCheck( hipEventRecord( d_stop, 0));
CudaCheck( hipEventSynchronize( d_stop ));
CudaCheck( hipEventElapsedTime( &GPU_timeSpent[i], d_start, d_stop ));
//GPU_timeSpent[i] /= 1000;
difference[i] = abs(GPU_sim[i].Expected - bs_price);
speedup[i] = abs(CPU_timeSpent / GPU_timeSpent[i]);
printf("\n");
}
// Comparing time spent with the two methods
printf( "\n-\tResults:\t-\n");
printf("Simulated price for the option with CPU: Expected price, I.C., diff from BS, time\n%f \n%f \n%f \n%.2f \n", CPU_sim.Expected, CPU_sim.Confidence, diff, CPU_timeSpent);
printf("Simulated price for the option with GPU:\n");
printf(" : NumThreads : Price : Confidence Interval : Difference from BS price : Time : Speedup :");
printf("\n");
for(i=0; i<NTHREADS; i++){
printf("%d \n",numThreads[i]);
printf("%f \n",GPU_sim[i].Expected);
printf("%f \n",GPU_sim[i].Confidence);
printf("%f \n",difference[i]);
printf("%.2f \n",GPU_timeSpent[i]);
printf("%.2f \n",speedup[i]);
printf("---\n");
}
CudaCheck( hipEventDestroy( d_start ));
CudaCheck( hipEventDestroy( d_stop ));
return 0;
}
| d579d70ceb8c652ff89eb10fcdf1a40b922abff5.cu | /*
* vanillaOpt.cu
* Monte Carlo methods in CUDA
* Dissertation project
* Created on: 06/feb/2018
* Author: Marco Matteo Buzzulini
* Copyright © 2018 Marco Matteo Buzzulini. All rights reserved.
*/
#include "MonteCarlo.h"
#define NTHREADS 1
#define THREADS 128
#define BLOCKS 512
#define SIMPB 131072
extern "C" float host_bsCall ( OptionData );
extern "C" OptionValue host_vanillaOpt(OptionData, int);
extern "C" OptionValue dev_vanillaOpt(OptionData *, int, int, int);
extern "C" void printOption( OptionData o);
const float S = 100;
const float K = 100;
const float R = 0.048790;
const float V = 0.2;
const float T = 1.f;
int main(int argc, const char * argv[]) {
/*------------------------- VARIABLES ------------------------------*/
// Option Data
OptionData option;
option.v = V;
option.s = S;
option.k= K;
option.r= R;
option.t= T;
// Simulation
unsigned int numBlocks, numThreads[NTHREADS], i, SIMS;
OptionValue CPU_sim, GPU_sim[NTHREADS];
float CPU_timeSpent=0, GPU_timeSpent[NTHREADS], speedup[NTHREADS];
float bs_price, difference[NTHREADS], diff;
cudaEvent_t d_start, d_stop;
/*----------------------- START PROGRAM ----------------------------*/
printf("Vanilla Option Pricing\n");
// CUDA parameters for parallel execution
numBlocks = BLOCKS;
//numThreads[1] = 128;
numThreads[0] = THREADS;
//numThreads[2] = 512;
//numThreads[3] = 1024;
printf("Inserisci il numero di simulazioni (x131.072): ");
scanf("%d",&SIMS);
SIMS *= SIMPB;
printf("\nScenari di Monte Carlo: %d\n",SIMS);
// Print Option details
printOption(option);
// Time instructions
CudaCheck( cudaEventCreate( &d_start ));
CudaCheck( cudaEventCreate( &d_stop ));
// Black & Scholes price
bs_price = host_bsCall(option);
printf("\nPrezzo Black & Scholes: %f\n",bs_price);
// CPU Monte Carlo
printf("\nMonte Carlo execution on CPU...\n");
CudaCheck( cudaEventRecord( d_start, 0 ));
CPU_sim=host_vanillaOpt(option, SIMS);
CudaCheck( cudaEventRecord( d_stop, 0));
CudaCheck( cudaEventSynchronize( d_stop ));
CudaCheck( cudaEventElapsedTime( &CPU_timeSpent, d_start, d_stop ));
//CPU_timeSpent /= 1000;
diff = abs(CPU_sim.Expected - bs_price);
// GPU Monte Carlo
printf("\nMonte Carlo execution on GPU...\n");
for(i=0; i<NTHREADS; i++){
printf("(NumBlocks, NumSimulations) : (%d,%d) x %d simulations per thread\n", BLOCKS, numThreads[i], SIMS/BLOCKS/numThreads[i]);
CudaCheck( cudaEventRecord( d_start, 0 ));
GPU_sim[i] = dev_vanillaOpt(&option, numBlocks, numThreads[i],SIMS);
CudaCheck( cudaEventRecord( d_stop, 0));
CudaCheck( cudaEventSynchronize( d_stop ));
CudaCheck( cudaEventElapsedTime( &GPU_timeSpent[i], d_start, d_stop ));
//GPU_timeSpent[i] /= 1000;
difference[i] = abs(GPU_sim[i].Expected - bs_price);
speedup[i] = abs(CPU_timeSpent / GPU_timeSpent[i]);
printf("\n");
}
// Comparing time spent with the two methods
printf( "\n-\tResults:\t-\n");
printf("Simulated price for the option with CPU: Expected price, I.C., diff from BS, time\n%f \n%f \n%f \n%.2f \n", CPU_sim.Expected, CPU_sim.Confidence, diff, CPU_timeSpent);
printf("Simulated price for the option with GPU:\n");
printf(" : NumThreads : Price : Confidence Interval : Difference from BS price : Time : Speedup :");
printf("\n");
for(i=0; i<NTHREADS; i++){
printf("%d \n",numThreads[i]);
printf("%f \n",GPU_sim[i].Expected);
printf("%f \n",GPU_sim[i].Confidence);
printf("%f \n",difference[i]);
printf("%.2f \n",GPU_timeSpent[i]);
printf("%.2f \n",speedup[i]);
printf("---\n");
}
CudaCheck( cudaEventDestroy( d_start ));
CudaCheck( cudaEventDestroy( d_stop ));
return 0;
}
|
3d75aca37d25b38fe8d0fb77ed1ec731311f1b3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "momentsMatrix.cuh"
struct InterpolateStruct{
ftype3 shifts;
int3 stencilMinPos;
__device__ __forceinline__ InterpolateStruct(ftype3 xf, int3 _sminpos): stencilMinPos(_sminpos) {
shifts = xf - make_ftype3(stencilMinPos);
}
template<class T,class F> __device__ inline T calc(F);
};
template<int RegOrder=-1> __global__ __launch_bounds__(LBMconsts::Qn) void streaming_collision(int ibn) {
ibn+= blockIdx.x;
if(ibn>=Nx*Ny*Nz) return;
const int ix = ibn%Nx;
const int iy = ibn/Nx%Ny;
const int iz = ibn/(Nx*Ny);
const int ild = 0;
const int ist = 1;
Cell cell = pars.data.get_cell(ild, ix,iy,iz);
const int3 ic = make_int3(ix, iy, iz);
const int3 Nxyz = make_int3(Nx,Ny,Nz);
using namespace LBMconsts;
int useSHmem4momMatrix = 0;
if(sizeof(MomentsMatrix)<48*1024) useSHmem4momMatrix = 1;
/*if(blockIdx.x==0) {
MomentsMatrix mtest;
for(int i=0;i<Qn;i++) for(int j=0;j<Qn;j++) mtest.m[i][j] = 2.+sqrt(abs(sin(100*i)*cos(i*j)));//1.23456+1.23*i/(j+1)+j/(i+1)*0.21;
MomentsMatrix mtestorig=mtest;
mtest.inverse();
ftype mmul[Qn][Qn];
for(int i=0;i<Qn;i++) for(int j=0;j<Qn;j++) {
ftype sum=0;
for(int k=0;k<Qn;k++) sum+= mtestorig.m[i][k]*mtest.m[k][j+Qn];
if(abs(sum)<1e-8) sum=0;
printf("%g ",sum);
if(j==Qn-1) printf("\n");
}
assert(0);
}*/
MomentsMatrix* Mm,*Mm0;
__shared__ MomentsMatrix _mmsh;
__shared__ MomentsMatrix* shMm;
//__shared__ MomentsMatrix _mmsh0;
//__shared__ MomentsMatrix* shMm0;
if(useSHmem4momMatrix) {
Mm = &_mmsh;
//Mm0 = &_mmsh0;
} else {
if(threadIdx.x==0) {
shMm = (MomentsMatrix*)malloc(sizeof(MomentsMatrix));
//shMm0 = (MomentsMatrix*)malloc(sizeof(MomentsMatrix));
assert(shMm);
//assert(shMm0);
}
__syncthreads();
Mm=shMm;
//Mm0=shMm0;
}
//cell.vel*=0; cell.T=PPdev.initial.T0/10.0;
__shared__ Cell cell_new;
int Niter=0;
while(Niter<100) {
ftype T = cell.T;
ftype rho=cell.rho;
ftype3 vel = cell.vel;
ftype4 gauge = make_ftype4(vel.x, vel.y, vel.z, sqrt(T/TLat));
__syncthreads();
if(RegOrder<0) {
Mm->init(gauge);
__syncthreads();
Mm->inverse();
__syncthreads();
}
/* Mm0->init(make_ftype4(0,0,0,1));
__syncthreads();
Mm0->inverse();
__syncthreads(); */
const int iq = threadIdx.x;
ftype3 v = ef[iq]*gauge.w + make_ftype3(gauge.x,gauge.y,gauge.z);
int3 interpStencilMinPos = ic - make_int3(PPdev.stencilInterpWidth/2);
const ftype3 xf = make_ftype3(ic)-v;
if(PPdev.stencilFixed==0) {
ftype3 pos = xf-make_ftype3(0.5*PPdev.stencilInterpWidth);
interpStencilMinPos = make_int3( round(pos.x), round(pos.y), round(pos.z) );
}
if(DIM<2) interpStencilMinPos.y = ic.y;
if(DIM<3) interpStencilMinPos.z = ic.z;
InterpolateStruct interpolate(xf, interpStencilMinPos);
//cell_new.f[iq] = pars.data.tiles[ild][interpStencilMinPos.x+interpStencilMinPos.y+interpStencilMinPos.z].f[iq];
if(RegOrder<0) {
cell_new.f[iq] = interpolate.calc<ftype>( [&] __device__ (int index) {
ftype mVec[Qn];
ftype4 igauge = pars.data.tiles[ild][index].uT[0];
igauge.w = sqrt(igauge.w/TLat);
calc_moments_vec( igauge, pars.data.tiles[ild][index].f, mVec );
/*ftype all_fi[Qn]; for(int ii=0;ii<Qn;ii++) all_fi[ii] = Mm->get_inv(iq,mVec);
calc_moments_vec( make_ftype4(0,0,0,1), all_fi, mVec );
for(int ii=0;ii<Qn;ii++) if(MomentsPower[ii].x+MomentsPower[ii].y+MomentsPower[ii].z>4) mVec[ii]=0;
const ftype fi_reg = Mm0->get_inv(iq,mVec);
return fi_reg;*/
const ftype fi = Mm->get_inv(iq, mVec);
return fi;
//return pars.data.tiles[ild][index].f[iq];
} );
} else {
TensorCoeffs<RegOrder> an = interpolate.calc< TensorCoeffs<RegOrder> >( [&] __device__ (int index) {
TensorCoeffs<RegOrder> an_p;
ftype4 igauge = pars.data.tiles[ild][index].uT[0];
igauge.w = sqrt(igauge.w/TLat);
calc_moments_tensors( igauge, pars.data.tiles[ild][index].f, an_p);
return an_p;
} );
TensorCoeffs<RegOrder> dn = convertAtoD(an, gauge);
cell_new.f[iq] = eval_fi_Hermit(dn, iq);
/*ftype4 tmpgauge = pars.data.tiles[ild][174].uT[0];
tmpgauge.w = sqrt(tmpgauge.w/TLat);
ftype mVec[Qn];
calc_moments_vec( tmpgauge, pars.data.tiles[ild][174].f, mVec );
if(ix==174 && iq==0) printf("Niter=%d moments=(%g %g %g %g %g %g)\n An=(%g %g %g %g %g %g)\n Dn=(%g %g %g %g %g %g)\n",
Niter, mVec[0],mVec[1],mVec[2],mVec[3],mVec[4],mVec[5],
an.k[0],an.k[1],an.k[2],an.k[3],an.k[4],an.k[5],
dn.k[0],dn.k[1],dn.k[2],dn.k[3],dn.k[4],dn.k[5]
);*/
}
__syncthreads();
if(threadIdx.x==0) {
ftype4 Vrho = make_ftype4(0,0,0,0);
ftype M2 = 0;
for(int ik=0; ik<Qn; ik++) {
ftype3 v_k = ef[ik]*gauge.w + make_ftype3(gauge.x,gauge.y,gauge.z);
Vrho+= make_ftype4(v_k.x,v_k.y,v_k.z,1)*cell_new.f[ik];
M2+= dot(v_k,v_k)*cell_new.f[ik];
}
cell_new.rho = Vrho.w;
cell_new.vel = make_ftype3(Vrho.x,Vrho.y,Vrho.z)/cell_new.rho;
cell_new.T = M2/cell_new.rho-dot(cell_new.vel,cell_new.vel); cell_new.T/=DIM;
if(PPdev.fixedTemperature) cell_new.T=cell.T;
if(cell_new.T<0) {
printf("Convergency problem: cell %d %d %d (iteration %d) got negative T=%g, reset to positive\n",
ix,iy,iz,Niter, cell_new.T );
cell_new.T=-cell_new.T;
}
}
__syncthreads();
Niter++;
if( isConv(cell,cell_new) ) { cell=cell_new; break; }
cell=cell_new;
__syncthreads();
}
//printf("ixyz=%d %d %d Niter=%d\n",ix,iy,iz, Niter);
__syncthreads();
if(threadIdx.x==0) {
if(!useSHmem4momMatrix) free(Mm);
ftype feq[Qn];
Cell::calcEq(feq, cell.rho, make_ftype3(0,0,0), TLat);
collision(cell.f,feq);
pars.data.set_cell(cell, ist, ix,iy,iz);
pars.data.tiles[ist][ix+iy*Nx+iz*Nx*Ny].Niter[0] = Niter;
}
}
inline __device__ ftype LagrPol(int ix,int iy,int iz, const ftype3 shifts, const int N);
template<class Interp_t, class F> __device__ inline Interp_t InterpolateStruct::calc(F func) {
const int3 Nxyz = make_int3(Nx,Ny,Nz);
Interp_t val(0);
const int Npoints = PPdev.stencilInterpWidth+1;
for(int xs=0; xs<Npoints; xs++) {
for(int ys=0; ys<((DIM<2)?1:Npoints); ys++) {
for(int zs=0; zs<((DIM<3)?1:Npoints); zs++) {
const int3 crd = ( stencilMinPos+make_int3(xs,ys,zs)+Nxyz )%Nxyz;
const int index = crd.x + crd.y*Nx + crd.z*Nx*Ny;
const ftype coeff = LagrPol(xs,ys,zs, shifts, Npoints);
auto Tcoffsp = func(index);
Tcoffsp*= coeff;
val+= Tcoffsp;
}
}
}
return val;
}
inline __device__ ftype LagrPol(int ix,int iy,int iz, const ftype3 shifts, const int N){
ftype a=1;
if(DIM>0) for(int ixp=0; ixp<N; ixp++) if(ixp!=ix) a*= (shifts.x-ixp)/(ix-ixp);
if(DIM>1) for(int iyp=0; iyp<N; iyp++) if(iyp!=iy) a*= (shifts.y-iyp)/(iy-iyp);
if(DIM>2) for(int izp=0; izp<N; izp++) if(izp!=iz) a*= (shifts.z-izp)/(iz-izp);
return a;
}
| 3d75aca37d25b38fe8d0fb77ed1ec731311f1b3c.cu | #include "momentsMatrix.cuh"
struct InterpolateStruct{
ftype3 shifts;
int3 stencilMinPos;
__device__ __forceinline__ InterpolateStruct(ftype3 xf, int3 _sminpos): stencilMinPos(_sminpos) {
shifts = xf - make_ftype3(stencilMinPos);
}
template<class T,class F> __device__ inline T calc(F);
};
template<int RegOrder=-1> __global__ __launch_bounds__(LBMconsts::Qn) void streaming_collision(int ibn) {
ibn+= blockIdx.x;
if(ibn>=Nx*Ny*Nz) return;
const int ix = ibn%Nx;
const int iy = ibn/Nx%Ny;
const int iz = ibn/(Nx*Ny);
const int ild = 0;
const int ist = 1;
Cell cell = pars.data.get_cell(ild, ix,iy,iz);
const int3 ic = make_int3(ix, iy, iz);
const int3 Nxyz = make_int3(Nx,Ny,Nz);
using namespace LBMconsts;
int useSHmem4momMatrix = 0;
if(sizeof(MomentsMatrix)<48*1024) useSHmem4momMatrix = 1;
/*if(blockIdx.x==0) {
MomentsMatrix mtest;
for(int i=0;i<Qn;i++) for(int j=0;j<Qn;j++) mtest.m[i][j] = 2.+sqrt(abs(sin(100*i)*cos(i*j)));//1.23456+1.23*i/(j+1)+j/(i+1)*0.21;
MomentsMatrix mtestorig=mtest;
mtest.inverse();
ftype mmul[Qn][Qn];
for(int i=0;i<Qn;i++) for(int j=0;j<Qn;j++) {
ftype sum=0;
for(int k=0;k<Qn;k++) sum+= mtestorig.m[i][k]*mtest.m[k][j+Qn];
if(abs(sum)<1e-8) sum=0;
printf("%g ",sum);
if(j==Qn-1) printf("\n");
}
assert(0);
}*/
MomentsMatrix* Mm,*Mm0;
__shared__ MomentsMatrix _mmsh;
__shared__ MomentsMatrix* shMm;
//__shared__ MomentsMatrix _mmsh0;
//__shared__ MomentsMatrix* shMm0;
if(useSHmem4momMatrix) {
Mm = &_mmsh;
//Mm0 = &_mmsh0;
} else {
if(threadIdx.x==0) {
shMm = (MomentsMatrix*)malloc(sizeof(MomentsMatrix));
//shMm0 = (MomentsMatrix*)malloc(sizeof(MomentsMatrix));
assert(shMm);
//assert(shMm0);
}
__syncthreads();
Mm=shMm;
//Mm0=shMm0;
}
//cell.vel*=0; cell.T=PPdev.initial.T0/10.0;
__shared__ Cell cell_new;
int Niter=0;
while(Niter<100) {
ftype T = cell.T;
ftype rho=cell.rho;
ftype3 vel = cell.vel;
ftype4 gauge = make_ftype4(vel.x, vel.y, vel.z, sqrt(T/TLat));
__syncthreads();
if(RegOrder<0) {
Mm->init(gauge);
__syncthreads();
Mm->inverse();
__syncthreads();
}
/* Mm0->init(make_ftype4(0,0,0,1));
__syncthreads();
Mm0->inverse();
__syncthreads(); */
const int iq = threadIdx.x;
ftype3 v = ef[iq]*gauge.w + make_ftype3(gauge.x,gauge.y,gauge.z);
int3 interpStencilMinPos = ic - make_int3(PPdev.stencilInterpWidth/2);
const ftype3 xf = make_ftype3(ic)-v;
if(PPdev.stencilFixed==0) {
ftype3 pos = xf-make_ftype3(0.5*PPdev.stencilInterpWidth);
interpStencilMinPos = make_int3( round(pos.x), round(pos.y), round(pos.z) );
}
if(DIM<2) interpStencilMinPos.y = ic.y;
if(DIM<3) interpStencilMinPos.z = ic.z;
InterpolateStruct interpolate(xf, interpStencilMinPos);
//cell_new.f[iq] = pars.data.tiles[ild][interpStencilMinPos.x+interpStencilMinPos.y+interpStencilMinPos.z].f[iq];
if(RegOrder<0) {
cell_new.f[iq] = interpolate.calc<ftype>( [&] __device__ (int index) {
ftype mVec[Qn];
ftype4 igauge = pars.data.tiles[ild][index].uT[0];
igauge.w = sqrt(igauge.w/TLat);
calc_moments_vec( igauge, pars.data.tiles[ild][index].f, mVec );
/*ftype all_fi[Qn]; for(int ii=0;ii<Qn;ii++) all_fi[ii] = Mm->get_inv(iq,mVec);
calc_moments_vec( make_ftype4(0,0,0,1), all_fi, mVec );
for(int ii=0;ii<Qn;ii++) if(MomentsPower[ii].x+MomentsPower[ii].y+MomentsPower[ii].z>4) mVec[ii]=0;
const ftype fi_reg = Mm0->get_inv(iq,mVec);
return fi_reg;*/
const ftype fi = Mm->get_inv(iq, mVec);
return fi;
//return pars.data.tiles[ild][index].f[iq];
} );
} else {
TensorCoeffs<RegOrder> an = interpolate.calc< TensorCoeffs<RegOrder> >( [&] __device__ (int index) {
TensorCoeffs<RegOrder> an_p;
ftype4 igauge = pars.data.tiles[ild][index].uT[0];
igauge.w = sqrt(igauge.w/TLat);
calc_moments_tensors( igauge, pars.data.tiles[ild][index].f, an_p);
return an_p;
} );
TensorCoeffs<RegOrder> dn = convertAtoD(an, gauge);
cell_new.f[iq] = eval_fi_Hermit(dn, iq);
/*ftype4 tmpgauge = pars.data.tiles[ild][174].uT[0];
tmpgauge.w = sqrt(tmpgauge.w/TLat);
ftype mVec[Qn];
calc_moments_vec( tmpgauge, pars.data.tiles[ild][174].f, mVec );
if(ix==174 && iq==0) printf("Niter=%d moments=(%g %g %g %g %g %g)\n An=(%g %g %g %g %g %g)\n Dn=(%g %g %g %g %g %g)\n",
Niter, mVec[0],mVec[1],mVec[2],mVec[3],mVec[4],mVec[5],
an.k[0],an.k[1],an.k[2],an.k[3],an.k[4],an.k[5],
dn.k[0],dn.k[1],dn.k[2],dn.k[3],dn.k[4],dn.k[5]
);*/
}
__syncthreads();
if(threadIdx.x==0) {
ftype4 Vrho = make_ftype4(0,0,0,0);
ftype M2 = 0;
for(int ik=0; ik<Qn; ik++) {
ftype3 v_k = ef[ik]*gauge.w + make_ftype3(gauge.x,gauge.y,gauge.z);
Vrho+= make_ftype4(v_k.x,v_k.y,v_k.z,1)*cell_new.f[ik];
M2+= dot(v_k,v_k)*cell_new.f[ik];
}
cell_new.rho = Vrho.w;
cell_new.vel = make_ftype3(Vrho.x,Vrho.y,Vrho.z)/cell_new.rho;
cell_new.T = M2/cell_new.rho-dot(cell_new.vel,cell_new.vel); cell_new.T/=DIM;
if(PPdev.fixedTemperature) cell_new.T=cell.T;
if(cell_new.T<0) {
printf("Convergency problem: cell %d %d %d (iteration %d) got negative T=%g, reset to positive\n",
ix,iy,iz,Niter, cell_new.T );
cell_new.T=-cell_new.T;
}
}
__syncthreads();
Niter++;
if( isConv(cell,cell_new) ) { cell=cell_new; break; }
cell=cell_new;
__syncthreads();
}
//printf("ixyz=%d %d %d Niter=%d\n",ix,iy,iz, Niter);
__syncthreads();
if(threadIdx.x==0) {
if(!useSHmem4momMatrix) free(Mm);
ftype feq[Qn];
Cell::calcEq(feq, cell.rho, make_ftype3(0,0,0), TLat);
collision(cell.f,feq);
pars.data.set_cell(cell, ist, ix,iy,iz);
pars.data.tiles[ist][ix+iy*Nx+iz*Nx*Ny].Niter[0] = Niter;
}
}
inline __device__ ftype LagrPol(int ix,int iy,int iz, const ftype3 shifts, const int N);
template<class Interp_t, class F> __device__ inline Interp_t InterpolateStruct::calc(F func) {
const int3 Nxyz = make_int3(Nx,Ny,Nz);
Interp_t val(0);
const int Npoints = PPdev.stencilInterpWidth+1;
for(int xs=0; xs<Npoints; xs++) {
for(int ys=0; ys<((DIM<2)?1:Npoints); ys++) {
for(int zs=0; zs<((DIM<3)?1:Npoints); zs++) {
const int3 crd = ( stencilMinPos+make_int3(xs,ys,zs)+Nxyz )%Nxyz;
const int index = crd.x + crd.y*Nx + crd.z*Nx*Ny;
const ftype coeff = LagrPol(xs,ys,zs, shifts, Npoints);
auto Tcoffsp = func(index);
Tcoffsp*= coeff;
val+= Tcoffsp;
}
}
}
return val;
}
inline __device__ ftype LagrPol(int ix,int iy,int iz, const ftype3 shifts, const int N){
ftype a=1;
if(DIM>0) for(int ixp=0; ixp<N; ixp++) if(ixp!=ix) a*= (shifts.x-ixp)/(ix-ixp);
if(DIM>1) for(int iyp=0; iyp<N; iyp++) if(iyp!=iy) a*= (shifts.y-iyp)/(iy-iyp);
if(DIM>2) for(int izp=0; izp<N; izp++) if(izp!=iz) a*= (shifts.z-izp)/(iz-izp);
return a;
}
|
36371ff8bd2bcdb94eec30000ea853393273ba83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*=======================================
Since : May/19/2008
Update: <2016/02/26>
CUDA
=======================================*/
#include "dendrite.h"
__device__ double dV_d(double V_d,double V_s,double h_d,double n_d,double p_d){
double m_inf_d=1/(1+exp(-(V_d-V12_d)/k_m_inf_d));
return( g_na_d* (pow(m_inf_d,2)) *h_d * (V_na - V_d) + g_dr_d * (pow(n_d,2)) *p_d *(V_k -V_d)+ (g_c/(1-kappa))*(V_s-V_d) + g_leak *(V_l - V_d));
}
__device__ double dh_d(double h_d, double V_d){
double h_inf_d=1/(1+exp(-(V_d-V12_h_d)/k_h_d));
return( (h_inf_d - h_d) /tau_h_d );
}
__device__ double dn_d(double n_d, double V_d){
double n_inf_d=1/(1+exp(-(V_d-V12_n_d)/k_n_d));
return( (n_inf_d - n_d) /tau_n_d );
}
__device__ double dp_d(double p_d, double V_d){
double p_inf_d=1/(1+exp(-(V_d-V12_p_d)/k_p_d));
return( (p_inf_d - p_d) /tau_p_d );
}
__device__ double dV_s(double V_s, double inp, double V_d,double n_s){
double m_inf_s=1/(1+exp(-(V_s-V12_s)/k_m_inf_s));
return (inp + g_na_s * (pow(m_inf_s,2) ) * (1-n_s )* (V_na - V_s) +g_dr_s * pow(n_s,2) *(V_k -V_s)+ (g_c/kappa)*(V_d-V_s) +g_leak *(V_l-V_s));
}
__device__ double dn_s(double n_s, double V_s){
double n_inf_s=1/(1+exp(-(V_s-V12_s)/k_m_inf_s));
return( (n_inf_s - n_s) /tau_n_s );
}
__global__ void init(double *V_s, double *n_s,double *V_d,double *h_d, double *n_d,double *p_d, int *spike_s,int *spike_d, double *inp,double *THl,int *spikecnt_s,int *spikecnt_d,int *count_s,int *count_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
V_s[i] = V0;
n_s[i] = 0.5;
V_d[i] = V0;
h_d[i] = 0.1;
n_d[i] = 0.1;
p_d[i] = 0.1;
inp[i] = 0;
spike_s[i] = 0;
spike_d[i] =0;
spikecnt_s[i]=0;
spikecnt_d[i]=0;
count_s[i]=0;
count_d[i]=0;
THl[i]=TH;
}
__global__ void calv(double *V_s, double *n_s,double *V_d,double *h_d, double *n_d,double *p_d, int *spike_s,int *spike_d, double *inp, double *t,int *spikecnt_s,int *spikecnt_d,int *count_s,int *count_d,double *THl,int sq,double sigma)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// int i0=(t[0]/TEND)*sq/2;
//int j0=sq/2;
//inp[i] = I0lif * (sqrt(2*M_PI*(__powf(sigma,2)) *__expf( -( (__powf(i-i0,2) +__powf(j0,2))) / (2*(__powf(sigma,2))))));//*sin(2.0*M_PI*t[0]/100.0); //gauss+sin imp
inp[i]=I0lif;//*sin(2.0*M_PI*t[0]/100.0); //gauss+sin imp
double kV_s1 = DT*dV_s(V_s[i], inp[i], V_d[i],n_s[i]);
double kn_s1 = DT*dn_s(n_s[i], V_s[i]);
double kV_d1 = DT*dV_d(V_d[i], V_s[i] ,h_d[i], n_d[i], p_d[i]);
double kh_d1 = DT*dh_d(h_d[i], V_d[i]);
double kn_d1 = DT*dn_d(n_d[i], V_d[i]);
double kp_d1 = DT*dp_d(p_d[i], V_d[i]);
double kV_s2 = DT*dV_s(V_s[i]+kV_s1*0.5, inp[i], V_d[i]+kV_d1*0.5 ,n_s[i]+kn_s1*0.5);
double kn_s2 = DT*dn_s(n_s[i]+kn_s1*0.5, V_s[i]+kV_s1*0.5);
double kV_d2 = DT*dV_d(V_d[i]+kV_d1*0.5, V_s[i]+kV_s1*0.5 ,h_d[i]+kh_d1*0.5, n_d[i]+kn_d1*0.5, p_d[i]+kp_d1*0.5);
double kh_d2 = DT*dh_d(h_d[i]+kh_d1*0.5, V_d[i]+kV_d1*0.5);
double kn_d2 = DT*dn_d(n_d[i]+kn_d1*0.5, V_d[i]+kV_d1*0.5);
double kp_d2 = DT*dp_d(p_d[i]+kp_d1*0.5, V_d[i]+kV_d1*0.5);
double kV_s3 = DT*dV_s(V_s[i]+kV_s2*0.5, inp[i], V_d[i]+kV_d2*0.5 ,n_s[i]+kn_s2*0.5);
double kn_s3 = DT*dn_s(n_s[i]+kn_s2*0.5, V_s[i]+kV_s2*0.5);
double kV_d3 = DT*dV_d(V_d[i]+kV_d2*0.5, V_s[i]+kV_s2*0.5 ,h_d[i]+kh_d2*0.5, n_d[i]+kn_d2*0.5, p_d[i]+kp_d2*0.5);
double kh_d3 = DT*dh_d(h_d[i]+kh_d2*0.5, V_d[i]+kV_d2*0.5);
double kn_d3 = DT*dn_d(n_d[i]+kn_d2*0.5, V_d[i]+kV_d2*0.5);
double kp_d3 = DT*dp_d(p_d[i]+kp_d2*0.5, V_d[i]+kV_d2*0.5);
double kV_s4 = DT*dV_s(V_s[i]+kV_s3, inp[i], V_d[i]+kV_d2 ,n_s[i]+kn_s2);
double kn_s4 = DT*dn_s(n_s[i]+kn_s3, V_s[i]+kV_s3);
double kV_d4 = DT*dV_d(V_d[i]+kV_d3, V_s[i]+kV_s3 ,h_d[i]+kh_d3, n_d[i]+kn_d3, p_d[i]+kp_d3);
double kh_d4 = DT*dh_d(h_d[i]+kh_d3, V_d[i]+kV_d3);
double kn_d4 = DT*dn_d(n_d[i]+kn_d3, V_d[i]+kV_d3);
double kp_d4 = DT*dp_d(p_d[i]+kp_d3, V_d[i]+kV_d3);
V_s[i] += (kV_s1 + 2.0*kV_s2 + 2.0*kV_s3 + kV_s4)/6.0;
n_s[i] += (kn_s1 + 2.0*kn_s2 + 2.0*kn_s3 + kn_s4)/6.0;
V_d[i] += (kV_d1 + 2.0*kV_d2 + 2.0*kV_d3 + kV_d4)/6.0;
h_d[i] += (kh_d1 + 2.0*kh_d2 + 2.0*kh_d3 + kh_d4)/6.0;
n_d[i] += (kn_d1 + 2.0*kn_d2 + 2.0*kn_d3 + kn_d4)/6.0;
p_d[i] += (kp_d1 + 2.0*kp_d2 + 2.0*kp_d3 + kp_d4)/6.0;
if(V_s[i] > 20 and count_s[i] == 0){
spike_s[i] = spike_s[i]+1;
spikecnt_s[i]=spike_s[i];
//THl[i]=THl[i]+THup;
count_s[i]=1;
//fprintf(fp1,"%d\t %d\t %d\n \n",i,int(t),spikecnt_s[i] );
}
if(V_d[i] > 20 and count_d[i] == 0){
spike_d[i] = spike_d[i]+1;
spikecnt_d[i]=spike_d[i];
count_d[i]=1;
//THl[i]=THl[i]+THup;
//fprintf(fp2,"%d\t %d\t %d\n \n",i,int(t),spikecnt_d[i] );
}
if(int(t[0])%10==0){
spike_s[i]=0;
spike_d[i]=0;
}
if(count_d[i]==1 and V_d[i]<=-55){
count_d[i]=0;
}
if(count_s[i]==1 and V_s[i]<=-55){
count_s[i]=0;
}
//fprintf(fp3,"%lf \t %lf \n",t,V_s[0]);
//fprintf(fp4,"%lf \t %lf \n",t,V_d[0]);
}
void Simulation::sim()
{
int count = 0;
int size_d = sizeof(double)*NUM;
int size_i = sizeof(int)*NUM;
double *V_s, *n_s,*V_d,*h_d,*n_d,*p_d;
double *d_V_s, *d_n_s,*d_V_d,*d_h_d,*d_n_d,*d_p_d;
double *inp,*d_inp;
V_s = (double *)malloc(size_d);
n_s = (double *)malloc(size_d);
V_d = (double *)malloc(size_d);
h_d = (double *)malloc(size_d);
n_d = (double *)malloc(size_d);
p_d = (double *)malloc(size_d);
inp = (double *)malloc(size_d);
hipMalloc((void **)&d_V_s, size_d);
hipMalloc((void **)&d_n_s, size_d);
hipMalloc((void **)&d_V_d, size_d);
hipMalloc((void **)&d_h_d, size_d);
hipMalloc((void **)&d_n_d, size_d);
hipMalloc((void **)&d_p_d, size_d);
hipMalloc((void **)&d_inp, size_d);
int *count_s,*d_count_s;
int *count_d,*d_count_d;
count_s = (int *)malloc(size_i);
count_d = (int *)malloc(size_i);
hipMalloc((void **)&d_count_s, size_i);
hipMalloc((void **)&d_count_d, size_i);
double t = 0.0;
double *d_t;
hipMalloc((void **)&d_t, sizeof(double));
int *spike_s,*d_spike_s;
int *spikecnt_s,*d_spikecnt_s;
int *spike_d,*d_spike_d;
int *spikecnt_d,*d_spikecnt_d;
double *THl,*d_THl;
spike_s = (int *)malloc(size_i);
spikecnt_s = (int *)malloc(size_i);
spike_d = (int *)malloc(size_i);
spikecnt_d = (int *)malloc(size_i);
THl = (double *)malloc(size_d);
hipMalloc((void **)&d_spike_s, size_i);
hipMalloc((void **)&d_spikecnt_s, size_i);
hipMalloc((void **)&d_spike_d, size_i);
hipMalloc((void **)&d_spikecnt_d, size_i);
hipMalloc((void **)&d_THl, size_d);
FILE *fp1,*fp2,*fp3,*fp4;
fp1=fopen("Vs_moved.txt","w");
fp2=fopen("Vd_moved.txt","w");
fp3=fopen("cuda_double_Vs0_volt.txt","w");
fp4=fopen("cuda_double_Vd0_volt.txt","w");
hipLaunchKernelGGL(( init), dim3(NUM/ Threads), dim3(Threads), 0, 0, d_V_s,d_n_s, d_V_d, d_h_d, d_n_d, d_p_d, d_spike_s,d_spike_d, d_inp, d_THl,d_spikecnt_s,d_spikecnt_d,d_count_s,d_count_d);
//fprintf(fp2,"%lf \t %lf \n",t,V_s[0]);
for(;;){
//hipMemcpy(d_t, &t, sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calv), dim3(NUM/ Threads),dim3(Threads), 0, 0, d_V_s, d_n_s, d_V_d, d_h_d, d_n_d, d_p_d, d_spike_s,d_spike_d,d_inp, d_t, d_spikecnt_s,d_spikecnt_d,d_count_s,d_count_d,d_THl ,sq,sigma);
hipMemcpy(V_s,d_V_s, size_d, hipMemcpyDeviceToHost);
hipMemcpy(n_s,d_n_s, size_d, hipMemcpyDeviceToHost);
hipMemcpy(V_d,d_V_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(h_d,d_h_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(n_d,d_n_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(p_d,d_p_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(inp,d_inp, size_d, hipMemcpyDeviceToHost);
hipMemcpy(count_s,d_count_s, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spike_s,d_spike_s, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spikecnt_s,d_spikecnt_s, size_i, hipMemcpyDeviceToHost);
hipMemcpy(count_d,d_count_d, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spike_d,d_spike_d, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spikecnt_d,d_spikecnt_d, size_i, hipMemcpyDeviceToHost);
for(int i=0;i<NUM;i++){
if(V_s[0] > 20 and spikecnt_s[i] > 0){
// fprintf(fp1,"%d\t %d\t %d\n \n",i,int(t),spikecnt_s[i] );
}
}
// fprintf(fp3,"%lf \t %lf \n",t,V_s[0]);
//fprintf(fp4,"%lf \t %lf \n",t,V_d[0]);
count++;
t = count * DT;
if( t > TEND){
break;
}
/*
hipFree(d_V_s);
hipFree(d_n_s);
hipFree(d_V_d);
hipFree(d_h_d);
hipFree(d_n_d);
hipFree(d_p_d);
hipFree(d_count_s);
hipFree(d_count_d);
hipFree(spike_s);
hipFree(spike_d);
hipFree(spikecnt_s);
hipFree(spikecnt_d);
*/
}
hipMemcpy(V_s,d_V_s, size_d, hipMemcpyDeviceToHost);
hipMemcpy(n_s,d_n_s, size_d, hipMemcpyDeviceToHost);
hipMemcpy(V_d,d_V_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(h_d,d_h_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(n_d,d_n_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(p_d,d_p_d, size_d, hipMemcpyDeviceToHost);
hipMemcpy(inp,d_inp, size_d, hipMemcpyDeviceToHost);
hipMemcpy(count_s,d_count_s, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spike_s,d_spike_s, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spikecnt_s,d_spikecnt_s, size_i, hipMemcpyDeviceToHost);
hipMemcpy(count_d,d_count_d, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spike_d,d_spike_d, size_i, hipMemcpyDeviceToHost);
hipMemcpy(spikecnt_d,d_spikecnt_d, size_i, hipMemcpyDeviceToHost);
free(V_s);
free(n_s);
free(V_d);
free(h_d);
free(n_d);
free(p_d);
hipFree(d_V_s);
hipFree(d_n_s);
hipFree(d_V_d);
hipFree(d_h_d);
hipFree(d_n_d);
hipFree(d_p_d);
free(count_s);
free(count_d);
hipFree(d_count_s);
hipFree(d_count_d);
free(spike_s);
free(spike_d);
free(spikecnt_s);
free(spikecnt_d);
hipFree(THl);
hipFree(spike_s);
hipFree(spike_d);
hipFree(spikecnt_s);
hipFree(spikecnt_d);
hipFree(THl);
fclose(fp1);
fclose(fp2);
fclose(fp3);
fclose(fp4);
}
/*
int main(int argc, char* argv[]){
Simulation sim;
sim.sim();
return(0);
}
*/
int main(int argc, char* argv[]){
Simulation sim;
sim.sim();
return(0);
}
| 36371ff8bd2bcdb94eec30000ea853393273ba83.cu | /*=======================================
Since : May/19/2008
Update: <2016/02/26>
CUDA
=======================================*/
#include "dendrite.h"
__device__ double dV_d(double V_d,double V_s,double h_d,double n_d,double p_d){
double m_inf_d=1/(1+exp(-(V_d-V12_d)/k_m_inf_d));
return( g_na_d* (pow(m_inf_d,2)) *h_d * (V_na - V_d) + g_dr_d * (pow(n_d,2)) *p_d *(V_k -V_d)+ (g_c/(1-kappa))*(V_s-V_d) + g_leak *(V_l - V_d));
}
__device__ double dh_d(double h_d, double V_d){
double h_inf_d=1/(1+exp(-(V_d-V12_h_d)/k_h_d));
return( (h_inf_d - h_d) /tau_h_d );
}
__device__ double dn_d(double n_d, double V_d){
double n_inf_d=1/(1+exp(-(V_d-V12_n_d)/k_n_d));
return( (n_inf_d - n_d) /tau_n_d );
}
__device__ double dp_d(double p_d, double V_d){
double p_inf_d=1/(1+exp(-(V_d-V12_p_d)/k_p_d));
return( (p_inf_d - p_d) /tau_p_d );
}
__device__ double dV_s(double V_s, double inp, double V_d,double n_s){
double m_inf_s=1/(1+exp(-(V_s-V12_s)/k_m_inf_s));
return (inp + g_na_s * (pow(m_inf_s,2) ) * (1-n_s )* (V_na - V_s) +g_dr_s * pow(n_s,2) *(V_k -V_s)+ (g_c/kappa)*(V_d-V_s) +g_leak *(V_l-V_s));
}
__device__ double dn_s(double n_s, double V_s){
double n_inf_s=1/(1+exp(-(V_s-V12_s)/k_m_inf_s));
return( (n_inf_s - n_s) /tau_n_s );
}
__global__ void init(double *V_s, double *n_s,double *V_d,double *h_d, double *n_d,double *p_d, int *spike_s,int *spike_d, double *inp,double *THl,int *spikecnt_s,int *spikecnt_d,int *count_s,int *count_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
V_s[i] = V0;
n_s[i] = 0.5;
V_d[i] = V0;
h_d[i] = 0.1;
n_d[i] = 0.1;
p_d[i] = 0.1;
inp[i] = 0;
spike_s[i] = 0;
spike_d[i] =0;
spikecnt_s[i]=0;
spikecnt_d[i]=0;
count_s[i]=0;
count_d[i]=0;
THl[i]=TH;
}
__global__ void calv(double *V_s, double *n_s,double *V_d,double *h_d, double *n_d,double *p_d, int *spike_s,int *spike_d, double *inp, double *t,int *spikecnt_s,int *spikecnt_d,int *count_s,int *count_d,double *THl,int sq,double sigma)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
// int i0=(t[0]/TEND)*sq/2;
//int j0=sq/2;
//inp[i] = I0lif * (sqrt(2*M_PI*(__powf(sigma,2)) *__expf( -( (__powf(i-i0,2) +__powf(j0,2))) / (2*(__powf(sigma,2))))));//*sin(2.0*M_PI*t[0]/100.0); //gauss+sin imp
inp[i]=I0lif;//*sin(2.0*M_PI*t[0]/100.0); //gauss+sin imp
double kV_s1 = DT*dV_s(V_s[i], inp[i], V_d[i],n_s[i]);
double kn_s1 = DT*dn_s(n_s[i], V_s[i]);
double kV_d1 = DT*dV_d(V_d[i], V_s[i] ,h_d[i], n_d[i], p_d[i]);
double kh_d1 = DT*dh_d(h_d[i], V_d[i]);
double kn_d1 = DT*dn_d(n_d[i], V_d[i]);
double kp_d1 = DT*dp_d(p_d[i], V_d[i]);
double kV_s2 = DT*dV_s(V_s[i]+kV_s1*0.5, inp[i], V_d[i]+kV_d1*0.5 ,n_s[i]+kn_s1*0.5);
double kn_s2 = DT*dn_s(n_s[i]+kn_s1*0.5, V_s[i]+kV_s1*0.5);
double kV_d2 = DT*dV_d(V_d[i]+kV_d1*0.5, V_s[i]+kV_s1*0.5 ,h_d[i]+kh_d1*0.5, n_d[i]+kn_d1*0.5, p_d[i]+kp_d1*0.5);
double kh_d2 = DT*dh_d(h_d[i]+kh_d1*0.5, V_d[i]+kV_d1*0.5);
double kn_d2 = DT*dn_d(n_d[i]+kn_d1*0.5, V_d[i]+kV_d1*0.5);
double kp_d2 = DT*dp_d(p_d[i]+kp_d1*0.5, V_d[i]+kV_d1*0.5);
double kV_s3 = DT*dV_s(V_s[i]+kV_s2*0.5, inp[i], V_d[i]+kV_d2*0.5 ,n_s[i]+kn_s2*0.5);
double kn_s3 = DT*dn_s(n_s[i]+kn_s2*0.5, V_s[i]+kV_s2*0.5);
double kV_d3 = DT*dV_d(V_d[i]+kV_d2*0.5, V_s[i]+kV_s2*0.5 ,h_d[i]+kh_d2*0.5, n_d[i]+kn_d2*0.5, p_d[i]+kp_d2*0.5);
double kh_d3 = DT*dh_d(h_d[i]+kh_d2*0.5, V_d[i]+kV_d2*0.5);
double kn_d3 = DT*dn_d(n_d[i]+kn_d2*0.5, V_d[i]+kV_d2*0.5);
double kp_d3 = DT*dp_d(p_d[i]+kp_d2*0.5, V_d[i]+kV_d2*0.5);
double kV_s4 = DT*dV_s(V_s[i]+kV_s3, inp[i], V_d[i]+kV_d2 ,n_s[i]+kn_s2);
double kn_s4 = DT*dn_s(n_s[i]+kn_s3, V_s[i]+kV_s3);
double kV_d4 = DT*dV_d(V_d[i]+kV_d3, V_s[i]+kV_s3 ,h_d[i]+kh_d3, n_d[i]+kn_d3, p_d[i]+kp_d3);
double kh_d4 = DT*dh_d(h_d[i]+kh_d3, V_d[i]+kV_d3);
double kn_d4 = DT*dn_d(n_d[i]+kn_d3, V_d[i]+kV_d3);
double kp_d4 = DT*dp_d(p_d[i]+kp_d3, V_d[i]+kV_d3);
V_s[i] += (kV_s1 + 2.0*kV_s2 + 2.0*kV_s3 + kV_s4)/6.0;
n_s[i] += (kn_s1 + 2.0*kn_s2 + 2.0*kn_s3 + kn_s4)/6.0;
V_d[i] += (kV_d1 + 2.0*kV_d2 + 2.0*kV_d3 + kV_d4)/6.0;
h_d[i] += (kh_d1 + 2.0*kh_d2 + 2.0*kh_d3 + kh_d4)/6.0;
n_d[i] += (kn_d1 + 2.0*kn_d2 + 2.0*kn_d3 + kn_d4)/6.0;
p_d[i] += (kp_d1 + 2.0*kp_d2 + 2.0*kp_d3 + kp_d4)/6.0;
if(V_s[i] > 20 and count_s[i] == 0){
spike_s[i] = spike_s[i]+1;
spikecnt_s[i]=spike_s[i];
//THl[i]=THl[i]+THup;
count_s[i]=1;
//fprintf(fp1,"%d\t %d\t %d\n \n",i,int(t),spikecnt_s[i] );
}
if(V_d[i] > 20 and count_d[i] == 0){
spike_d[i] = spike_d[i]+1;
spikecnt_d[i]=spike_d[i];
count_d[i]=1;
//THl[i]=THl[i]+THup;
//fprintf(fp2,"%d\t %d\t %d\n \n",i,int(t),spikecnt_d[i] );
}
if(int(t[0])%10==0){
spike_s[i]=0;
spike_d[i]=0;
}
if(count_d[i]==1 and V_d[i]<=-55){
count_d[i]=0;
}
if(count_s[i]==1 and V_s[i]<=-55){
count_s[i]=0;
}
//fprintf(fp3,"%lf \t %lf \n",t,V_s[0]);
//fprintf(fp4,"%lf \t %lf \n",t,V_d[0]);
}
void Simulation::sim()
{
int count = 0;
int size_d = sizeof(double)*NUM;
int size_i = sizeof(int)*NUM;
double *V_s, *n_s,*V_d,*h_d,*n_d,*p_d;
double *d_V_s, *d_n_s,*d_V_d,*d_h_d,*d_n_d,*d_p_d;
double *inp,*d_inp;
V_s = (double *)malloc(size_d);
n_s = (double *)malloc(size_d);
V_d = (double *)malloc(size_d);
h_d = (double *)malloc(size_d);
n_d = (double *)malloc(size_d);
p_d = (double *)malloc(size_d);
inp = (double *)malloc(size_d);
cudaMalloc((void **)&d_V_s, size_d);
cudaMalloc((void **)&d_n_s, size_d);
cudaMalloc((void **)&d_V_d, size_d);
cudaMalloc((void **)&d_h_d, size_d);
cudaMalloc((void **)&d_n_d, size_d);
cudaMalloc((void **)&d_p_d, size_d);
cudaMalloc((void **)&d_inp, size_d);
int *count_s,*d_count_s;
int *count_d,*d_count_d;
count_s = (int *)malloc(size_i);
count_d = (int *)malloc(size_i);
cudaMalloc((void **)&d_count_s, size_i);
cudaMalloc((void **)&d_count_d, size_i);
double t = 0.0;
double *d_t;
cudaMalloc((void **)&d_t, sizeof(double));
int *spike_s,*d_spike_s;
int *spikecnt_s,*d_spikecnt_s;
int *spike_d,*d_spike_d;
int *spikecnt_d,*d_spikecnt_d;
double *THl,*d_THl;
spike_s = (int *)malloc(size_i);
spikecnt_s = (int *)malloc(size_i);
spike_d = (int *)malloc(size_i);
spikecnt_d = (int *)malloc(size_i);
THl = (double *)malloc(size_d);
cudaMalloc((void **)&d_spike_s, size_i);
cudaMalloc((void **)&d_spikecnt_s, size_i);
cudaMalloc((void **)&d_spike_d, size_i);
cudaMalloc((void **)&d_spikecnt_d, size_i);
cudaMalloc((void **)&d_THl, size_d);
FILE *fp1,*fp2,*fp3,*fp4;
fp1=fopen("Vs_moved.txt","w");
fp2=fopen("Vd_moved.txt","w");
fp3=fopen("cuda_double_Vs0_volt.txt","w");
fp4=fopen("cuda_double_Vd0_volt.txt","w");
init<<<NUM/ Threads, Threads>>>(d_V_s,d_n_s, d_V_d, d_h_d, d_n_d, d_p_d, d_spike_s,d_spike_d, d_inp, d_THl,d_spikecnt_s,d_spikecnt_d,d_count_s,d_count_d);
//fprintf(fp2,"%lf \t %lf \n",t,V_s[0]);
for(;;){
//cudaMemcpy(d_t, &t, sizeof(double), cudaMemcpyHostToDevice);
calv<<<NUM/ Threads,Threads>>>(d_V_s, d_n_s, d_V_d, d_h_d, d_n_d, d_p_d, d_spike_s,d_spike_d,d_inp, d_t, d_spikecnt_s,d_spikecnt_d,d_count_s,d_count_d,d_THl ,sq,sigma);
cudaMemcpy(V_s,d_V_s, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(n_s,d_n_s, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(V_d,d_V_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(h_d,d_h_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(n_d,d_n_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(p_d,d_p_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(inp,d_inp, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(count_s,d_count_s, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spike_s,d_spike_s, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spikecnt_s,d_spikecnt_s, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(count_d,d_count_d, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spike_d,d_spike_d, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spikecnt_d,d_spikecnt_d, size_i, cudaMemcpyDeviceToHost);
for(int i=0;i<NUM;i++){
if(V_s[0] > 20 and spikecnt_s[i] > 0){
// fprintf(fp1,"%d\t %d\t %d\n \n",i,int(t),spikecnt_s[i] );
}
}
// fprintf(fp3,"%lf \t %lf \n",t,V_s[0]);
//fprintf(fp4,"%lf \t %lf \n",t,V_d[0]);
count++;
t = count * DT;
if( t > TEND){
break;
}
/*
cudaFree(d_V_s);
cudaFree(d_n_s);
cudaFree(d_V_d);
cudaFree(d_h_d);
cudaFree(d_n_d);
cudaFree(d_p_d);
cudaFree(d_count_s);
cudaFree(d_count_d);
cudaFree(spike_s);
cudaFree(spike_d);
cudaFree(spikecnt_s);
cudaFree(spikecnt_d);
*/
}
cudaMemcpy(V_s,d_V_s, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(n_s,d_n_s, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(V_d,d_V_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(h_d,d_h_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(n_d,d_n_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(p_d,d_p_d, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(inp,d_inp, size_d, cudaMemcpyDeviceToHost);
cudaMemcpy(count_s,d_count_s, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spike_s,d_spike_s, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spikecnt_s,d_spikecnt_s, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(count_d,d_count_d, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spike_d,d_spike_d, size_i, cudaMemcpyDeviceToHost);
cudaMemcpy(spikecnt_d,d_spikecnt_d, size_i, cudaMemcpyDeviceToHost);
free(V_s);
free(n_s);
free(V_d);
free(h_d);
free(n_d);
free(p_d);
cudaFree(d_V_s);
cudaFree(d_n_s);
cudaFree(d_V_d);
cudaFree(d_h_d);
cudaFree(d_n_d);
cudaFree(d_p_d);
free(count_s);
free(count_d);
cudaFree(d_count_s);
cudaFree(d_count_d);
free(spike_s);
free(spike_d);
free(spikecnt_s);
free(spikecnt_d);
cudaFree(THl);
cudaFree(spike_s);
cudaFree(spike_d);
cudaFree(spikecnt_s);
cudaFree(spikecnt_d);
cudaFree(THl);
fclose(fp1);
fclose(fp2);
fclose(fp3);
fclose(fp4);
}
/*
int main(int argc, char* argv[]){
Simulation sim;
sim.sim();
return(0);
}
*/
int main(int argc, char* argv[]){
Simulation sim;
sim.sim();
return(0);
}
|
0c85e8291f22d88f9d90f1b37821ab2870805884.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <pthread.h>
#define BLOCK_SIZE 64
#define REDUCTION_BLOCK_SIZE 1024
#define PIx2 6.2831853071795864769252867665590058f
#include <sys/time.h>
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
//size needed: numK * 1
__global__ void ComputePhiMagGPU(struct kValues* kValsD, float* phiRD, float* phiID) {
int indexK = blockIdx.x * BLOCK_SIZE + threadIdx.x;
//Shared memory is not needed since this is a coalesced access.
//kVals.KxKyKz should be initialized in the host since it's pure memory operation. CUDA is not used for doing parrallel data memory operation.
kValsD[indexK].PhiMag = phiRD[indexK] * phiRD[indexK] + phiID[indexK] * phiID[indexK];
}
__global__ void ImprovedReductionKernel(float* globalData, int interval, int dataSize) {
int loc = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float data[REDUCTION_BLOCK_SIZE];
if (loc * interval < dataSize) {
//load to shared mem
data[threadIdx.x] = globalData[loc];
int stride = REDUCTION_BLOCK_SIZE / 2;
do {
__syncthreads();
/*if (threadIdx.x == 0)
printf("datasize=%d\n", dataSize);*/
if (threadIdx.x < stride && threadIdx.x + stride < dataSize) {
data[threadIdx.x] += data[threadIdx.x + stride];
/*printf("%f,", data[threadIdx.x]);*/
}
stride >>= 1;
} while (stride >= 1);
if (threadIdx.x == 0) {
globalData[loc] = data[0];
}
}
}
//size needed: numK * 1
__global__ void ComputeQGPU(float* globalqr, float* globalqi, struct kValues* globalkVals, float globalx, float globaly, float globalz) {
//constant memory will limit the scalibility
__shared__ float x, y, z;
__shared__ struct kValues kVals[BLOCK_SIZE];
__shared__ float Qracc[BLOCK_SIZE];
__shared__ float Qiacc[BLOCK_SIZE];
int indexK = blockIdx.x * BLOCK_SIZE + threadIdx.x;
//load shared mem
kVals[threadIdx.x] = globalkVals[indexK];
if (threadIdx.x == 0) {
x = globalx;
y = globaly;
z = globalz;
}
__syncthreads();
float expArg = PIx2 * (kVals[threadIdx.x].Kx * x + kVals[threadIdx.x].Ky * y + kVals[threadIdx.x].Kz * z);
float cosArg, sinArg;
sincosf(expArg, &sinArg, &cosArg);
//the following should be zero for padding
Qracc[threadIdx.x] = kVals[threadIdx.x].PhiMag * cosArg;
Qiacc[threadIdx.x] = kVals[threadIdx.x].PhiMag * sinArg;
//improved reduction
int stride = BLOCK_SIZE / 2;
do {
__syncthreads();
if (threadIdx.x < stride) {
Qracc[threadIdx.x] += Qracc[threadIdx.x + stride];
Qiacc[threadIdx.x] += Qiacc[threadIdx.x + stride];
}
stride >>= 1;
} while (stride >= 1);
if (threadIdx.x == 0) {
*(globalqr + blockIdx.x) = Qracc[0];
*(globalqi + blockIdx.x) = Qiacc[0];
}
}
//hipMalloc inside
void launchKernel(int numK, int numX, float* kxH, float* kyH, float* kzH,
float* xH, float* yH, float* zH, float* phiRH, float* phiIH, float* QrH, float* QiH,
float** phiRD, float** phiID, struct kValues** kValsD) {
struct timeval time0;
struct timeval time1;
struct timezone tz;
// long kernelTime = 0;
// long memoryTime = 0;
//calculate dimension
dim3 dim_grid, dim_block;
dim_grid.x = numK / BLOCK_SIZE + (numK % BLOCK_SIZE == 0 ? 0 : 1);
dim_grid.y = 1;
dim_grid.z = 1;
dim_block.x = BLOCK_SIZE;
dim_block.y = 1;
dim_block.z = 1;
fflush(stdout);
//prepare for calculating PhiMag
hipMalloc(kValsD, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues));
struct kValues* kVals = (struct kValues*)calloc(numK, sizeof(struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kxH[k];
kVals[k].Ky = kyH[k];
kVals[k].Kz = kzH[k];
}
// gettimeofday(&time0, &tz);
hipMemset(*kValsD, 0, numK * sizeof(struct kValues));
hipMemcpy(*kValsD, kVals, numK * sizeof(struct kValues), hipMemcpyHostToDevice);
hipMalloc(phiRD, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues));
hipMemset(*phiRD, 0, numK * sizeof(float)); //0 * n = 0
hipMemcpy(*phiRD, phiRH, numK * sizeof(struct kValues), hipMemcpyHostToDevice);
hipMalloc(phiID, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues));
hipMemcpy(*phiID, phiIH, numK * sizeof(struct kValues), hipMemcpyHostToDevice);
// gettimeofday(&time1, &tz);
// memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
//calculate phiMag
// gettimeofday(&time0, &tz);
hipLaunchKernelGGL(( ComputePhiMagGPU), dim3(dim_grid), dim3(dim_block), 0, 0, *kValsD, *phiRD, *phiID);
hipDeviceSynchronize();
// gettimeofday(&time1, &tz);
// kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
//launch kernel
//multithreading could be used, but it's not necessary. Even 32*32*32 input(numK=3072) would occupy all threads (2560 for RTX2070) simultaneously, which
//use around 2s of CPU. Multithreading would help if there are small inputs, but why not just do it on CPU?
//multithreading will decrease 32x32x32 performance by half
for (int indexX = 0; indexX < numX; indexX++) {
//allocate result space. per indexX
float* globalqrD;
float* globalqiD;
// gettimeofday(&time0, &tz);
hipMalloc(&globalqrD, dim_grid.x * sizeof(float));
hipMalloc(&globalqiD, dim_grid.x * sizeof(float));
// gettimeofday(&time1, &tz);
// memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
// gettimeofday(&time0, &tz);
hipLaunchKernelGGL(( ComputeQGPU), dim3(dim_grid), dim3(dim_block), 0, 0, globalqrD, globalqiD, *kValsD, xH[indexX], yH[indexX], zH[indexX]);
hipDeviceSynchronize();
// gettimeofday(&time1, &tz);
// kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
//reduction
int currentDataNum = dim_grid.x;
int interval = 1;
dim3 dim_grid_reduction, dim_block_reduction;
while (currentDataNum != 1) {
dim_grid_reduction.x = currentDataNum / REDUCTION_BLOCK_SIZE + (currentDataNum % REDUCTION_BLOCK_SIZE == 0 ? 0 : 1);
dim_grid_reduction.y = 1;
dim_grid_reduction.z = 1;
dim_block_reduction.x = REDUCTION_BLOCK_SIZE;
dim_block_reduction.y = 1;
dim_block_reduction.z = 1;
// gettimeofday(&time0, &tz);
hipLaunchKernelGGL(( ImprovedReductionKernel), dim3(dim_grid_reduction), dim3(dim_block_reduction), 0, 0, globalqrD, interval, currentDataNum);
hipLaunchKernelGGL(( ImprovedReductionKernel), dim3(dim_grid_reduction), dim3(dim_block_reduction), 0, 0, globalqiD, interval, currentDataNum);
hipDeviceSynchronize();
// gettimeofday(&time1, &tz);
// kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
interval *= REDUCTION_BLOCK_SIZE;
currentDataNum = currentDataNum / REDUCTION_BLOCK_SIZE + (currentDataNum % REDUCTION_BLOCK_SIZE == 0 ? 0 : 1);
}
// gettimeofday(&time0, &tz);
hipMemcpy(&(QrH[indexX]), globalqrD, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&(QiH[indexX]), globalqiD, sizeof(float), hipMemcpyDeviceToHost);
// gettimeofday(&time1, &tz);
// memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
}
// printf("kernel: %ld us\n", kernelTime);
// printf("IO: %ld us\n", memoryTime);
}
| 0c85e8291f22d88f9d90f1b37821ab2870805884.cu |
#include <stdio.h>
#include <pthread.h>
#define BLOCK_SIZE 64
#define REDUCTION_BLOCK_SIZE 1024
#define PIx2 6.2831853071795864769252867665590058f
#include <sys/time.h>
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
//size needed: numK * 1
__global__ void ComputePhiMagGPU(struct kValues* kValsD, float* phiRD, float* phiID) {
int indexK = blockIdx.x * BLOCK_SIZE + threadIdx.x;
//Shared memory is not needed since this is a coalesced access.
//kVals.KxKyKz should be initialized in the host since it's pure memory operation. CUDA is not used for doing parrallel data memory operation.
kValsD[indexK].PhiMag = phiRD[indexK] * phiRD[indexK] + phiID[indexK] * phiID[indexK];
}
__global__ void ImprovedReductionKernel(float* globalData, int interval, int dataSize) {
int loc = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float data[REDUCTION_BLOCK_SIZE];
if (loc * interval < dataSize) {
//load to shared mem
data[threadIdx.x] = globalData[loc];
int stride = REDUCTION_BLOCK_SIZE / 2;
do {
__syncthreads();
/*if (threadIdx.x == 0)
printf("datasize=%d\n", dataSize);*/
if (threadIdx.x < stride && threadIdx.x + stride < dataSize) {
data[threadIdx.x] += data[threadIdx.x + stride];
/*printf("%f,", data[threadIdx.x]);*/
}
stride >>= 1;
} while (stride >= 1);
if (threadIdx.x == 0) {
globalData[loc] = data[0];
}
}
}
//size needed: numK * 1
__global__ void ComputeQGPU(float* globalqr, float* globalqi, struct kValues* globalkVals, float globalx, float globaly, float globalz) {
//constant memory will limit the scalibility
__shared__ float x, y, z;
__shared__ struct kValues kVals[BLOCK_SIZE];
__shared__ float Qracc[BLOCK_SIZE];
__shared__ float Qiacc[BLOCK_SIZE];
int indexK = blockIdx.x * BLOCK_SIZE + threadIdx.x;
//load shared mem
kVals[threadIdx.x] = globalkVals[indexK];
if (threadIdx.x == 0) {
x = globalx;
y = globaly;
z = globalz;
}
__syncthreads();
float expArg = PIx2 * (kVals[threadIdx.x].Kx * x + kVals[threadIdx.x].Ky * y + kVals[threadIdx.x].Kz * z);
float cosArg, sinArg;
sincosf(expArg, &sinArg, &cosArg);
//the following should be zero for padding
Qracc[threadIdx.x] = kVals[threadIdx.x].PhiMag * cosArg;
Qiacc[threadIdx.x] = kVals[threadIdx.x].PhiMag * sinArg;
//improved reduction
int stride = BLOCK_SIZE / 2;
do {
__syncthreads();
if (threadIdx.x < stride) {
Qracc[threadIdx.x] += Qracc[threadIdx.x + stride];
Qiacc[threadIdx.x] += Qiacc[threadIdx.x + stride];
}
stride >>= 1;
} while (stride >= 1);
if (threadIdx.x == 0) {
*(globalqr + blockIdx.x) = Qracc[0];
*(globalqi + blockIdx.x) = Qiacc[0];
}
}
//cudaMalloc inside
void launchKernel(int numK, int numX, float* kxH, float* kyH, float* kzH,
float* xH, float* yH, float* zH, float* phiRH, float* phiIH, float* QrH, float* QiH,
float** phiRD, float** phiID, struct kValues** kValsD) {
struct timeval time0;
struct timeval time1;
struct timezone tz;
// long kernelTime = 0;
// long memoryTime = 0;
//calculate dimension
dim3 dim_grid, dim_block;
dim_grid.x = numK / BLOCK_SIZE + (numK % BLOCK_SIZE == 0 ? 0 : 1);
dim_grid.y = 1;
dim_grid.z = 1;
dim_block.x = BLOCK_SIZE;
dim_block.y = 1;
dim_block.z = 1;
fflush(stdout);
//prepare for calculating PhiMag
cudaMalloc(kValsD, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues));
struct kValues* kVals = (struct kValues*)calloc(numK, sizeof(struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kxH[k];
kVals[k].Ky = kyH[k];
kVals[k].Kz = kzH[k];
}
// gettimeofday(&time0, &tz);
cudaMemset(*kValsD, 0, numK * sizeof(struct kValues));
cudaMemcpy(*kValsD, kVals, numK * sizeof(struct kValues), cudaMemcpyHostToDevice);
cudaMalloc(phiRD, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues));
cudaMemset(*phiRD, 0, numK * sizeof(float)); //0 * n = 0
cudaMemcpy(*phiRD, phiRH, numK * sizeof(struct kValues), cudaMemcpyHostToDevice);
cudaMalloc(phiID, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues));
cudaMemcpy(*phiID, phiIH, numK * sizeof(struct kValues), cudaMemcpyHostToDevice);
// gettimeofday(&time1, &tz);
// memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
//calculate phiMag
// gettimeofday(&time0, &tz);
ComputePhiMagGPU<<<dim_grid, dim_block>>> (*kValsD, *phiRD, *phiID);
cudaDeviceSynchronize();
// gettimeofday(&time1, &tz);
// kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
//launch kernel
//multithreading could be used, but it's not necessary. Even 32*32*32 input(numK=3072) would occupy all threads (2560 for RTX2070) simultaneously, which
//use around 2s of CPU. Multithreading would help if there are small inputs, but why not just do it on CPU?
//multithreading will decrease 32x32x32 performance by half
for (int indexX = 0; indexX < numX; indexX++) {
//allocate result space. per indexX
float* globalqrD;
float* globalqiD;
// gettimeofday(&time0, &tz);
cudaMalloc(&globalqrD, dim_grid.x * sizeof(float));
cudaMalloc(&globalqiD, dim_grid.x * sizeof(float));
// gettimeofday(&time1, &tz);
// memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
// gettimeofday(&time0, &tz);
ComputeQGPU<<<dim_grid, dim_block>>>(globalqrD, globalqiD, *kValsD, xH[indexX], yH[indexX], zH[indexX]);
cudaDeviceSynchronize();
// gettimeofday(&time1, &tz);
// kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
//reduction
int currentDataNum = dim_grid.x;
int interval = 1;
dim3 dim_grid_reduction, dim_block_reduction;
while (currentDataNum != 1) {
dim_grid_reduction.x = currentDataNum / REDUCTION_BLOCK_SIZE + (currentDataNum % REDUCTION_BLOCK_SIZE == 0 ? 0 : 1);
dim_grid_reduction.y = 1;
dim_grid_reduction.z = 1;
dim_block_reduction.x = REDUCTION_BLOCK_SIZE;
dim_block_reduction.y = 1;
dim_block_reduction.z = 1;
// gettimeofday(&time0, &tz);
ImprovedReductionKernel<<<dim_grid_reduction, dim_block_reduction>>>(globalqrD, interval, currentDataNum);
ImprovedReductionKernel<<<dim_grid_reduction, dim_block_reduction>>>(globalqiD, interval, currentDataNum);
cudaDeviceSynchronize();
// gettimeofday(&time1, &tz);
// kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
interval *= REDUCTION_BLOCK_SIZE;
currentDataNum = currentDataNum / REDUCTION_BLOCK_SIZE + (currentDataNum % REDUCTION_BLOCK_SIZE == 0 ? 0 : 1);
}
// gettimeofday(&time0, &tz);
cudaMemcpy(&(QrH[indexX]), globalqrD, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&(QiH[indexX]), globalqiD, sizeof(float), cudaMemcpyDeviceToHost);
// gettimeofday(&time1, &tz);
// memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec;
}
// printf("kernel: %ld us\n", kernelTime);
// printf("IO: %ld us\n", memoryTime);
}
|
30bbefb25e57a9f62571ba4fb7d19df39889bf08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
// Exercise 10
// Written by: Jiho Yang (M.Sc student in Computational Science & Engineering)
// Matriculation number: 03675799
#include "helper.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include <unistd.h>
using namespace std;
const float pi = 3.141592653589793238462f;
// uncomment to use the camera
//#define CAMERA
// Compute gradient
__global__ void compute_gradient(float *d_gradx, float *d_grady, float *d_imgIn, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get high indices
size_t x_high = x + 1 + (size_t)w*y + (size_t)h*w*z;
size_t y_high = x + (size_t)w*(y+1) + (size_t)h*w*z;
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
// Ensure no threads are out of problem domain
if (x < w && y < h && z < nc){
// Compute gradient
if (x < w-1){
d_gradx[idx] = d_imgIn[x_high] - d_imgIn[idx];
} else
d_gradx[idx] = 0;
if (y < h-1){
d_grady[idx] = d_imgIn[y_high] - d_imgIn[idx];
} else
d_grady[idx] = 0;
}
}
// Compute L2 norm
__device__ void compute_norm(float *d_norm, float *d_vec1, float *d_vec2, int w, int h, int nc){
// Temporary variable for norm
float sqrd1 = 0;
float sqrd2 = 0;
float val1, val2;
// Get coordinates
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
// Get index
int idx = x + (size_t)w*y;
// Compute norm
if (x < w && y < h){
for (size_t c = 0; c < nc; c++){
// Get index
size_t idx_3d = idx + (size_t)w*h*c;
// Compute L2 norm
val1 = d_vec1[idx_3d];
val2 = d_vec2[idx_3d];
sqrd1 += val1*val1;
sqrd2 += val2*val2;
}
d_norm[idx] = sqrtf(sqrd1*sqrd1 + sqrd2*sqrd2);
}
}
// Compute divergence
__global__ void compute_divergence(float *d_div, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get low indices
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
size_t x_low = x-1 + (size_t)w*y + (size_t)h*w*z;
size_t y_low = x + (size_t)w*(y-1) + (size_t)h*w*z;
// Temporary values
float v_x, v_y;
// Ensure no threads are out of problem domain
if (x < w && y < h && z < nc){
// Compute divergence
if (x > 1){
v_x = d_gradx[idx] - d_gradx[x_low];
} else
v_x = 0;
if (y > 1){
v_y = d_grady[idx] - d_grady[y_low];
} else
v_y = 0;
// Sum gradients
d_div[idx] = v_x + v_y;
}
/*
if (idx == 0){
printf("Divergence = %f\n", d_div[idx]);
}
*/
}
// Convolution on global memory
__global__ void convolution_global(float *d_imgIn, float *d_imgOut, float *d_kernel, int w, int h, int nc, int w_kernel, int h_kernel){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
//int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get indices
size_t idx = x + (size_t)w*y;
// Initialise d_imgOut
// Set origin
int mid = (w_kernel-1)/2;
// Convolution - Note x_kernel is the global x coordinate of kernel in the problem domain
for (size_t c = 0; c < nc; c++){
size_t idx_3d = idx + (size_t)w*h*c;
d_imgOut[idx_3d] = 0.0f;
if (x < w && y < h){
for (size_t j = 0; j < h_kernel; j++){
for (size_t i = 0; i < w_kernel; i++){
// Boundary condition
int x_kernel_global = x - mid + i;
int y_kernel_global = y - mid + j;
// clamping
if (x_kernel_global < 0){
x_kernel_global = 0;
}
if (x_kernel_global > w-1){
x_kernel_global = w - 1;
}
if (y_kernel_global < 0){
y_kernel_global = 0;
}
if (y_kernel_global > h - 1){
y_kernel_global = h - 1;
}
// Get indices
int idx_kernel_local = i + w_kernel*j;
int idx_kernel_global = x_kernel_global + w*y_kernel_global + w*h*c;
// Multiply and sum
d_imgOut[idx_3d] += d_kernel[idx_kernel_local] * d_imgIn[idx_kernel_global];
}
}
}
}
}
// Set up kernel
void get_kernel(float *kernel, int w_kernel, int h_kernel, const float pi, float sigma){
//Set up parameters
int origin = w_kernel/2;
float total = 0.0f;
// Define 2D Gaussian kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int a = x_kernel - origin;
int b = y_kernel - origin;
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] = (1.0f / (2.0f*pi*sigma*sigma))*exp(-1*((a*a+b*b) / (2*sigma*sigma)));
total += kernel[idx];
}
}
// Normalise kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] /= total;
}
}
}
// Compute eigenvalue of a 2 by 2 matrix
__device__ void compute_eigenvalue(float &eigen_value_0, float &eigen_value_1, float &eigen_vector_0, float &eigen_vector_1, float &eigen_vector_2, float &eigen_vector_3, float d_t1_val, float d_t2_val, float d_t3_val, int w, int h){
// Define matrix
float A[4] = {d_t1_val, d_t2_val, d_t2_val, d_t3_val};
// Define elements
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
// Trace and determinant
float T = a + d;
float D = a*d - b*c;
// Compute eigenvalue
eigen_value_0 = T/2 + sqrtf(T*T/4-D);
eigen_value_1 = T/2 - sqrtf(T*T/4-D);
// Sort eigenvalue array (val_1 > val_2)
if (eigen_value_0 < eigen_value_1){
float swap = eigen_value_0;
eigen_value_0 = eigen_value_1;
eigen_value_1 = swap;
}
// Compute eigenvectors
/*
if (c != 0){
eigen_vector_0 = eigen_value_0 - d;
eigen_vector_1 = c;
eigen_vector_2 = eigen_value_1 - d;
eigen_vector_3 = c;
}
*/
if (b != 0){
eigen_vector_0 = b;
eigen_vector_1 = eigen_value_0 - a;
eigen_vector_2 = b;
eigen_vector_3 = eigen_value_1 - a;
}
else if (b == 0 && c == 0){
eigen_vector_0 = 1;
eigen_vector_1 = 0;
eigen_vector_2 = 0;
eigen_vector_3 = 1;
}
// Scale eigenvector
eigen_vector_0 = 1.f*eigen_vector_0;
eigen_vector_1 = 1.f*eigen_vector_1;
eigen_vector_2 = 1.f*eigen_vector_2;
eigen_vector_3 = 1.f*eigen_vector_3;
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
/*
if (idx == 0){
printf("a = %f\n", a);
printf("b = %f\n", b);
printf("c = %f\n", c);
printf("d = %f\n", d);
}
*/
}
// Apply anisotropic diffusion
__global__ void apply_diffusion(float *d_gradx, float *d_grady, float *d_imgIn, float alpha, float C, float *d_t1, float *d_t2, float *d_t3, int w, int h, int nc){
// Define eigenvalue and eigenvector
float eigen_value_0, eigen_value_1;
float eigen_vector_0, eigen_vector_1, eigen_vector_2, eigen_vector_3;
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
size_t idx_2d = x + (size_t)w*y;
// Compute eigenvalues and eigenvector
if (x < w && y < h && z < nc){
compute_eigenvalue(eigen_value_0, eigen_value_1, eigen_vector_0, eigen_vector_1, eigen_vector_2, eigen_vector_3, d_t1[idx_2d], d_t2[idx_2d], d_t3[idx_2d], w, h);
}
__syncthreads();
// Get Mu
float mu_1 = alpha;
float mu_2;
if (eigen_value_0 == eigen_value_1){
mu_2 = alpha;
} else{
float eigdif = eigen_value_0 - eigen_value_1;
float inside = -C/(eigdif*eigdif);
mu_2 = alpha + (1 - alpha)*exp(inside);
}
// Get diffusion tensor
float G[4];
G[0] = mu_1*eigen_vector_0*eigen_vector_0 + mu_2*eigen_vector_2*eigen_vector_2;
G[1] = mu_1*eigen_vector_0*eigen_vector_1 + mu_2*eigen_vector_2*eigen_vector_3;
G[2] = mu_1*eigen_vector_1*eigen_vector_0 + mu_2*eigen_vector_3*eigen_vector_2;
G[3] = mu_1*eigen_vector_1*eigen_vector_1 + mu_2*eigen_vector_3*eigen_vector_3;
//G[0] = 1;
//G[1] = 0;
//G[2] = 0;
//G[3] = 1;
__syncthreads();
/*
if (idx == 0){
printf("Before diffusion\n");
printf("d_gradx = %f\n", d_gradx[idx]);
printf("d_grady = %f\n", d_grady[idx]);
}
*/
// Update gradient
if (x < w && y < h && z < nc){
d_gradx[idx] = G[0]*d_gradx[idx] + G[1]*d_grady[idx];
d_grady[idx] = G[2]*d_gradx[idx] + G[3]*d_grady[idx];
}
/*
if (idx == 0){
printf("After diffusion\n");
printf("d_gradx = %f\n", d_gradx[idx]);
printf("d_grady = %f\n", d_grady[idx]);
printf("G[0] = %f\n", G[0]);
printf("G[1] = %f\n", G[1]);
printf("G[2] = %f\n", G[2]);
printf("G[3] = %f\n", G[3]);
printf("eigenvalue_0 = %f\n", eigen_value_0);
printf("eigenvalue_1 = %f\n", eigen_value_1);
printf("eigenvector_0 = %f\n", eigen_vector_0);
printf("eigenvector_1 = %f\n", eigen_vector_1);
printf("eigenvector_2 = %f\n", eigen_vector_2);
printf("eigenvector_3 = %f\n", eigen_vector_3);
printf("Mu_1 = %f\n", mu_1);
printf("Mu_2 = %f\n", mu_2);
}
*/
}
// Update image
__global__ void update_image(float *d_imgIn, float *d_div, float tau, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
/*
if (idx == 0){
printf("Before update\n");
printf("d_imgIn = %f\n", d_imgIn[idx]);
}
*/
if (x < w && y < h && z < nc){
// Update image
d_imgIn[idx] += tau * d_div[idx];
}
/*
if (idx == 0){
printf("After update\n");
printf("d_imgIn = %f\n", d_imgIn[idx]);
}
*/
}
// Compute M
__global__ void compute_M(float *d_m1, float *d_m2, float *d_m3, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
// Get index in matrices m
size_t idx_2d = x + (size_t)w*y;
// Initialise sums
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
if (x < w && y < h){
// Loop through channels
for (size_t c = 0 ; c < nc; c++){
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*c;
sum1 += d_gradx[idx] * d_gradx[idx];
sum2 += d_gradx[idx] * d_grady[idx];
sum3 += d_grady[idx] * d_grady[idx];
}
// Fill matrices
d_m1[idx_2d] = sum1;
d_m2[idx_2d] = sum2;
d_m3[idx_2d] = sum3;
}
}
// Rotationally robust gradient
__global__ void rotational_gradient(float *d_imgIn, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get indices
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
// Compute gradient
if (x < w && y < h && z < nc){
// Get neighbouring indices
int x_high = x + 1;
int y_high = y + 1;
int x_low = x - 1;
int y_low = y - 1;
// Clamping
if (x_high > w - 1){
x_high = w - 1;
}
if (y_high > h - 1){
y_high = h - 1;
}
if (x_low < 0){
x_low = 0;
}
if (y_low < 0){
y_low = 0;
}
// Get indices of neighbouring indices
size_t idx_x_high_y_high = x_high + (size_t)w*y_high + (size_t)w*h*z;
size_t idx_x_high_y_low = x_high + (size_t)w*y_low + (size_t)w*h*z;
size_t idx_x_low_y_high = x_low + (size_t)w*y_high + (size_t)w*h*z;
size_t idx_x_low_y_low = x_low + (size_t)w*y_low + (size_t)w*h*z;
size_t idx_x_high_y_mid = x_high + (size_t)w*y + (size_t)w*h*z;
size_t idx_x_low_y_mid = x_low + (size_t)w*y + (size_t)w*h*z;
size_t idx_x_mid_y_high = x + (size_t)w*y_high + (size_t)w*h*z;
size_t idx_x_mid_y_low = x + (size_t)w*y_low + (size_t)w*h*z;
// Compute gradient
d_gradx[idx] = (3*d_imgIn[idx_x_high_y_high] + 10*d_imgIn[idx_x_high_y_mid]
+ 3*d_imgIn[idx_x_high_y_low] - 3*d_imgIn[idx_x_low_y_high]
- 10*d_imgIn[idx_x_low_y_mid] - 3*d_imgIn[idx_x_low_y_low])/32;
d_grady[idx] = (3*d_imgIn[idx_x_high_y_high] + 10*d_imgIn[idx_x_mid_y_high]
+ 3*d_imgIn[idx_x_low_y_high] - 3*d_imgIn[idx_x_high_y_low]
- 10*d_imgIn[idx_x_mid_y_low] - 3*d_imgIn[idx_x_low_y_low])/32;
}
}
// main
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Diffusion
float tau = 0.01f;
int N = 20000;
// Convolution kernel
float sigma = 0.5f;
float phi = 3.0f;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
// Define kernel dimensions
int r = ceil(3*sigma);
int w_kernel = r * 2 + 1; //windowing
int h_kernel = w_kernel; //Square kernel
int r_phi = ceil(3*phi);
int w_kernel_phi = r_phi * 2 + 1;
int h_kernel_phi = w_kernel_phi;
// Kernel information
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
cv:: Mat mgradx(h, w, mIn.type());
cv:: Mat mgrady(h, w, mIn.type());
cv:: Mat mdiv(h, w, mIn.type());
cv:: Mat mt1(h, w, CV_32FC1);
cv:: Mat mt2(h, w, CV_32FC1);
cv:: Mat mt3(h, w, CV_32FC1);
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// Get array memory
int nbytes = w * h * nc * sizeof(float);
int nbytes_kernel = w_kernel * h_kernel * sizeof(float);
int nbytes_kernel_phi = w_kernel_phi * h_kernel_phi * sizeof(float);
// allocate raw input image array
float *imgIn = new float[(size_t)nbytes];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// ###
// ###
// ### TODO: Main computation
// ###
// ###
////////////////////////////////////////////////////////////////////// Block setting ///////////////////////////////////////////////////////////////////////
dim3 block = dim3(128, 1, 1);
dim3 grid = dim3((w + block.x - 1) / block.x, (h + block.y - 1) / block.y, (nc + block.z - 1));
float alpha = 0.9f;
float C = 0.0000005f;
Timer timer; timer.start();
////////////////////////////////////////////////////////////////////// Arrays (Device) /////////////////////////////////////////////////////////////////////
// Kernel
float *d_kernel;
float *d_kernel_phi;
// Images
float *d_imgIn;
float *d_imgOut;
// Gradients
float *d_gradx;
float *d_grady;
// Gradients for structure tensor
float *d_gradx_tensor;
float *d_grady_tensor;
float *d_m1;
float *d_m2;
float *d_m3;
float *d_t1;
float *d_t2;
float *d_t3;
// Norm
float *d_norm;
// Divergence
float *d_div;
////////////////////////////////////////////////////////////////////// Arrays (Host) /////////////////////////////////////////////////////////////////////
// Kernel
float *kernel = new float[nbytes_kernel];
float *kernel_phi = new float[nbytes_kernel_phi];
// Structure tensor
float *m1 = new float[w*h];
float *m2 = new float[w*h];
float *m3 = new float[w*h];
float *t1 = new float[w*h];
float *t2 = new float[w*h];
float *t3 = new float[w*h];
// Gradient
float *gradx = new float[nbytes];
float *grady = new float[nbytes];
// Divergence
float *div = new float[nbytes];
////////Create kernel
get_kernel(kernel, w_kernel, h_kernel, pi, sigma);
get_kernel(kernel_phi, w_kernel_phi, h_kernel_phi, pi, phi);
// Processor type
string processor;
////////////////////////////////////////////////////////////////////////// CUDA //////////////////////////////////////////////////////////////////////////
// CUDA malloc
// Kernel
hipMalloc(&d_kernel, nbytes_kernel); CUDA_CHECK;
hipMalloc(&d_kernel_phi, nbytes_kernel_phi); CUDA_CHECK;
// Images
hipMalloc(&d_imgIn, nbytes); CUDA_CHECK;
hipMalloc(&d_imgOut, nbytes); CUDA_CHECK;
// Gradients
hipMalloc(&d_gradx, nbytes); CUDA_CHECK;
hipMalloc(&d_grady, nbytes); CUDA_CHECK;
// Gradients for structure tensor
hipMalloc(&d_gradx_tensor, nbytes); CUDA_CHECK;
hipMalloc(&d_grady_tensor, nbytes); CUDA_CHECK;
hipMalloc(&d_m1, w*h*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_m2, w*h*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_m3, w*h*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_t1, w*h*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_t2, w*h*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_t3, w*h*sizeof(float)); CUDA_CHECK;
// Norm
hipMalloc(&d_norm, w*h*sizeof(float)); CUDA_CHECK;
// Divergence
hipMalloc(&d_div, nbytes); CUDA_CHECK;
// CUDA copy
hipMemcpy(d_kernel, kernel, nbytes_kernel, hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_kernel_phi, kernel_phi, nbytes_kernel_phi, hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_imgIn, imgIn, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
// Iterations
for (size_t i = 0; i < N; i++){
// Initial convolution - structure tensor
hipLaunchKernelGGL(( convolution_global) , dim3(grid), dim3(block) , 0, 0, d_imgIn, d_imgOut, d_kernel, w, h, nc, w_kernel, h_kernel); CUDA_CHECK;
// Compute gradient of the convoluted image - structure tensor
hipLaunchKernelGGL(( rotational_gradient) , dim3(grid), dim3(block) , 0, 0, d_imgIn, d_gradx_tensor, d_grady_tensor, w, h, nc); CUDA_CHECK;
// Compute m1, m2, and m3 - structure tensor
hipLaunchKernelGGL(( compute_M) , dim3(grid), dim3(block) , 0, 0, d_m1, d_m2, d_m3, d_gradx_tensor, d_grady_tensor, w, h, nc); CUDA_CHECK;
// Convolution on m1 - structure tensor
hipLaunchKernelGGL(( convolution_global) , dim3(grid), dim3(block) , 0, 0, d_m1, d_t1, d_kernel_phi, w, h, 1, w_kernel_phi, h_kernel_phi); CUDA_CHECK;
// Convolution on m2 - structure tensor
hipLaunchKernelGGL(( convolution_global) , dim3(grid), dim3(block) , 0, 0, d_m2, d_t2, d_kernel_phi, w, h, 1, w_kernel_phi, h_kernel_phi); CUDA_CHECK;
// Convolution on m3 - structure tensor
hipLaunchKernelGGL(( convolution_global) , dim3(grid), dim3(block) , 0, 0, d_m3, d_t3, d_kernel_phi, w, h, 1, w_kernel_phi, h_kernel_phi); CUDA_CHECK;
// Compute gradient
hipLaunchKernelGGL(( compute_gradient) , dim3(grid), dim3(block) , 0, 0, d_gradx, d_grady, d_imgIn, w, h, nc); CUDA_CHECK;
// Apply diffusion tensor
hipLaunchKernelGGL(( apply_diffusion) , dim3(grid), dim3(block) , 0, 0, d_gradx, d_grady, d_imgIn, alpha, C, d_t1, d_t2, d_t3, w, h, nc); CUDA_CHECK;
// Compute divergence
hipLaunchKernelGGL(( compute_divergence) , dim3(grid), dim3(block) , 0, 0, d_div, d_gradx, d_grady, w, h, nc); CUDA_CHECK;
// Update image
hipLaunchKernelGGL(( update_image) , dim3(grid), dim3(block) , 0, 0, d_imgIn, d_div, tau, w, h, nc); CUDA_CHECK;
cout << "Iteration = " << i << endl;
}
// Copy the results to host
hipMemcpy(imgOut, d_imgIn, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(gradx, d_gradx_tensor, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(grady, d_grady_tensor, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(div, d_div, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(t1, d_t1, w*h*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(t2, d_t2, w*h*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(t3, d_t3, w*h*sizeof(float), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_imgIn); CUDA_CHECK;
hipFree(d_imgOut); CUDA_CHECK;
hipFree(d_kernel); CUDA_CHECK;
hipFree(d_div); CUDA_CHECK;
hipFree(d_gradx); CUDA_CHECK;
hipFree(d_grady); CUDA_CHECK;
hipFree(d_norm); CUDA_CHECK;
hipFree(d_t1);
hipFree(d_t2);
hipFree(d_t3);
// Type of processor
processor = "GPU - global memory";
cout << processor << endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
timer.end(); float t = timer.get();
cout << "time: " << t*1000 << " ms" << endl;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Diffusion", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
convert_layered_to_mat(mgradx, gradx);
convert_layered_to_mat(mgrady, grady);
convert_layered_to_mat(mdiv, div);
convert_layered_to_mat(mt1, t1);
convert_layered_to_mat(mt2, t2);
convert_layered_to_mat(mt3, t3);
//showImage("t1", 10.f*mt1, 50, 250);
//showImage("t2", 10.f*mt2, 50 + w, 250);
//showImage("t3", 10.f*mt3, 50 + 2 * w, 250);
//showImage("grad_x", mgradx, 100+w+50, 150);
//showImage("grad_y", mgrady, 100+w+60, 150);
//showImage("div", mdiv, 100+w+80, 200);
/*
showImage("m1", 10.f*mM1, 50, 200);
showImage("m2", 10.f*mM2, 50 + w, 200);
showImage("m3", 10.f*mM3, 50 + 2 * w, 200);
showImage("t1", 10.f*mT1, 50, 250);
showImage("t2", 10.f*mT2, 50 + w, 250);
showImage("t3", 10.f*mT3, 50 + 2 * w, 250);
*/
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
#ifdef CAMERA
delete[] imgIn;
delete[] imgOut;
#else
delete[] imgIn;
delete[] imgOut;
delete[] kernel;
delete[] gradx;
delete[] grady;
#endif
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 30bbefb25e57a9f62571ba4fb7d19df39889bf08.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
// Exercise 10
// Written by: Jiho Yang (M.Sc student in Computational Science & Engineering)
// Matriculation number: 03675799
#include "helper.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include <unistd.h>
using namespace std;
const float pi = 3.141592653589793238462f;
// uncomment to use the camera
//#define CAMERA
// Compute gradient
__global__ void compute_gradient(float *d_gradx, float *d_grady, float *d_imgIn, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get high indices
size_t x_high = x + 1 + (size_t)w*y + (size_t)h*w*z;
size_t y_high = x + (size_t)w*(y+1) + (size_t)h*w*z;
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
// Ensure no threads are out of problem domain
if (x < w && y < h && z < nc){
// Compute gradient
if (x < w-1){
d_gradx[idx] = d_imgIn[x_high] - d_imgIn[idx];
} else
d_gradx[idx] = 0;
if (y < h-1){
d_grady[idx] = d_imgIn[y_high] - d_imgIn[idx];
} else
d_grady[idx] = 0;
}
}
// Compute L2 norm
__device__ void compute_norm(float *d_norm, float *d_vec1, float *d_vec2, int w, int h, int nc){
// Temporary variable for norm
float sqrd1 = 0;
float sqrd2 = 0;
float val1, val2;
// Get coordinates
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
// Get index
int idx = x + (size_t)w*y;
// Compute norm
if (x < w && y < h){
for (size_t c = 0; c < nc; c++){
// Get index
size_t idx_3d = idx + (size_t)w*h*c;
// Compute L2 norm
val1 = d_vec1[idx_3d];
val2 = d_vec2[idx_3d];
sqrd1 += val1*val1;
sqrd2 += val2*val2;
}
d_norm[idx] = sqrtf(sqrd1*sqrd1 + sqrd2*sqrd2);
}
}
// Compute divergence
__global__ void compute_divergence(float *d_div, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get low indices
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
size_t x_low = x-1 + (size_t)w*y + (size_t)h*w*z;
size_t y_low = x + (size_t)w*(y-1) + (size_t)h*w*z;
// Temporary values
float v_x, v_y;
// Ensure no threads are out of problem domain
if (x < w && y < h && z < nc){
// Compute divergence
if (x > 1){
v_x = d_gradx[idx] - d_gradx[x_low];
} else
v_x = 0;
if (y > 1){
v_y = d_grady[idx] - d_grady[y_low];
} else
v_y = 0;
// Sum gradients
d_div[idx] = v_x + v_y;
}
/*
if (idx == 0){
printf("Divergence = %f\n", d_div[idx]);
}
*/
}
// Convolution on global memory
__global__ void convolution_global(float *d_imgIn, float *d_imgOut, float *d_kernel, int w, int h, int nc, int w_kernel, int h_kernel){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
//int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get indices
size_t idx = x + (size_t)w*y;
// Initialise d_imgOut
// Set origin
int mid = (w_kernel-1)/2;
// Convolution - Note x_kernel is the global x coordinate of kernel in the problem domain
for (size_t c = 0; c < nc; c++){
size_t idx_3d = idx + (size_t)w*h*c;
d_imgOut[idx_3d] = 0.0f;
if (x < w && y < h){
for (size_t j = 0; j < h_kernel; j++){
for (size_t i = 0; i < w_kernel; i++){
// Boundary condition
int x_kernel_global = x - mid + i;
int y_kernel_global = y - mid + j;
// clamping
if (x_kernel_global < 0){
x_kernel_global = 0;
}
if (x_kernel_global > w-1){
x_kernel_global = w - 1;
}
if (y_kernel_global < 0){
y_kernel_global = 0;
}
if (y_kernel_global > h - 1){
y_kernel_global = h - 1;
}
// Get indices
int idx_kernel_local = i + w_kernel*j;
int idx_kernel_global = x_kernel_global + w*y_kernel_global + w*h*c;
// Multiply and sum
d_imgOut[idx_3d] += d_kernel[idx_kernel_local] * d_imgIn[idx_kernel_global];
}
}
}
}
}
// Set up kernel
void get_kernel(float *kernel, int w_kernel, int h_kernel, const float pi, float sigma){
//Set up parameters
int origin = w_kernel/2;
float total = 0.0f;
// Define 2D Gaussian kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int a = x_kernel - origin;
int b = y_kernel - origin;
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] = (1.0f / (2.0f*pi*sigma*sigma))*exp(-1*((a*a+b*b) / (2*sigma*sigma)));
total += kernel[idx];
}
}
// Normalise kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] /= total;
}
}
}
// Compute eigenvalue of a 2 by 2 matrix
__device__ void compute_eigenvalue(float &eigen_value_0, float &eigen_value_1, float &eigen_vector_0, float &eigen_vector_1, float &eigen_vector_2, float &eigen_vector_3, float d_t1_val, float d_t2_val, float d_t3_val, int w, int h){
// Define matrix
float A[4] = {d_t1_val, d_t2_val, d_t2_val, d_t3_val};
// Define elements
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
// Trace and determinant
float T = a + d;
float D = a*d - b*c;
// Compute eigenvalue
eigen_value_0 = T/2 + sqrtf(T*T/4-D);
eigen_value_1 = T/2 - sqrtf(T*T/4-D);
// Sort eigenvalue array (val_1 > val_2)
if (eigen_value_0 < eigen_value_1){
float swap = eigen_value_0;
eigen_value_0 = eigen_value_1;
eigen_value_1 = swap;
}
// Compute eigenvectors
/*
if (c != 0){
eigen_vector_0 = eigen_value_0 - d;
eigen_vector_1 = c;
eigen_vector_2 = eigen_value_1 - d;
eigen_vector_3 = c;
}
*/
if (b != 0){
eigen_vector_0 = b;
eigen_vector_1 = eigen_value_0 - a;
eigen_vector_2 = b;
eigen_vector_3 = eigen_value_1 - a;
}
else if (b == 0 && c == 0){
eigen_vector_0 = 1;
eigen_vector_1 = 0;
eigen_vector_2 = 0;
eigen_vector_3 = 1;
}
// Scale eigenvector
eigen_vector_0 = 1.f*eigen_vector_0;
eigen_vector_1 = 1.f*eigen_vector_1;
eigen_vector_2 = 1.f*eigen_vector_2;
eigen_vector_3 = 1.f*eigen_vector_3;
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
/*
if (idx == 0){
printf("a = %f\n", a);
printf("b = %f\n", b);
printf("c = %f\n", c);
printf("d = %f\n", d);
}
*/
}
// Apply anisotropic diffusion
__global__ void apply_diffusion(float *d_gradx, float *d_grady, float *d_imgIn, float alpha, float C, float *d_t1, float *d_t2, float *d_t3, int w, int h, int nc){
// Define eigenvalue and eigenvector
float eigen_value_0, eigen_value_1;
float eigen_vector_0, eigen_vector_1, eigen_vector_2, eigen_vector_3;
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
size_t idx_2d = x + (size_t)w*y;
// Compute eigenvalues and eigenvector
if (x < w && y < h && z < nc){
compute_eigenvalue(eigen_value_0, eigen_value_1, eigen_vector_0, eigen_vector_1, eigen_vector_2, eigen_vector_3, d_t1[idx_2d], d_t2[idx_2d], d_t3[idx_2d], w, h);
}
__syncthreads();
// Get Mu
float mu_1 = alpha;
float mu_2;
if (eigen_value_0 == eigen_value_1){
mu_2 = alpha;
} else{
float eigdif = eigen_value_0 - eigen_value_1;
float inside = -C/(eigdif*eigdif);
mu_2 = alpha + (1 - alpha)*exp(inside);
}
// Get diffusion tensor
float G[4];
G[0] = mu_1*eigen_vector_0*eigen_vector_0 + mu_2*eigen_vector_2*eigen_vector_2;
G[1] = mu_1*eigen_vector_0*eigen_vector_1 + mu_2*eigen_vector_2*eigen_vector_3;
G[2] = mu_1*eigen_vector_1*eigen_vector_0 + mu_2*eigen_vector_3*eigen_vector_2;
G[3] = mu_1*eigen_vector_1*eigen_vector_1 + mu_2*eigen_vector_3*eigen_vector_3;
//G[0] = 1;
//G[1] = 0;
//G[2] = 0;
//G[3] = 1;
__syncthreads();
/*
if (idx == 0){
printf("Before diffusion\n");
printf("d_gradx = %f\n", d_gradx[idx]);
printf("d_grady = %f\n", d_grady[idx]);
}
*/
// Update gradient
if (x < w && y < h && z < nc){
d_gradx[idx] = G[0]*d_gradx[idx] + G[1]*d_grady[idx];
d_grady[idx] = G[2]*d_gradx[idx] + G[3]*d_grady[idx];
}
/*
if (idx == 0){
printf("After diffusion\n");
printf("d_gradx = %f\n", d_gradx[idx]);
printf("d_grady = %f\n", d_grady[idx]);
printf("G[0] = %f\n", G[0]);
printf("G[1] = %f\n", G[1]);
printf("G[2] = %f\n", G[2]);
printf("G[3] = %f\n", G[3]);
printf("eigenvalue_0 = %f\n", eigen_value_0);
printf("eigenvalue_1 = %f\n", eigen_value_1);
printf("eigenvector_0 = %f\n", eigen_vector_0);
printf("eigenvector_1 = %f\n", eigen_vector_1);
printf("eigenvector_2 = %f\n", eigen_vector_2);
printf("eigenvector_3 = %f\n", eigen_vector_3);
printf("Mu_1 = %f\n", mu_1);
printf("Mu_2 = %f\n", mu_2);
}
*/
}
// Update image
__global__ void update_image(float *d_imgIn, float *d_div, float tau, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
/*
if (idx == 0){
printf("Before update\n");
printf("d_imgIn = %f\n", d_imgIn[idx]);
}
*/
if (x < w && y < h && z < nc){
// Update image
d_imgIn[idx] += tau * d_div[idx];
}
/*
if (idx == 0){
printf("After update\n");
printf("d_imgIn = %f\n", d_imgIn[idx]);
}
*/
}
// Compute M
__global__ void compute_M(float *d_m1, float *d_m2, float *d_m3, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
// Get index in matrices m
size_t idx_2d = x + (size_t)w*y;
// Initialise sums
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
if (x < w && y < h){
// Loop through channels
for (size_t c = 0 ; c < nc; c++){
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*c;
sum1 += d_gradx[idx] * d_gradx[idx];
sum2 += d_gradx[idx] * d_grady[idx];
sum3 += d_grady[idx] * d_grady[idx];
}
// Fill matrices
d_m1[idx_2d] = sum1;
d_m2[idx_2d] = sum2;
d_m3[idx_2d] = sum3;
}
}
// Rotationally robust gradient
__global__ void rotational_gradient(float *d_imgIn, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get indices
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
// Compute gradient
if (x < w && y < h && z < nc){
// Get neighbouring indices
int x_high = x + 1;
int y_high = y + 1;
int x_low = x - 1;
int y_low = y - 1;
// Clamping
if (x_high > w - 1){
x_high = w - 1;
}
if (y_high > h - 1){
y_high = h - 1;
}
if (x_low < 0){
x_low = 0;
}
if (y_low < 0){
y_low = 0;
}
// Get indices of neighbouring indices
size_t idx_x_high_y_high = x_high + (size_t)w*y_high + (size_t)w*h*z;
size_t idx_x_high_y_low = x_high + (size_t)w*y_low + (size_t)w*h*z;
size_t idx_x_low_y_high = x_low + (size_t)w*y_high + (size_t)w*h*z;
size_t idx_x_low_y_low = x_low + (size_t)w*y_low + (size_t)w*h*z;
size_t idx_x_high_y_mid = x_high + (size_t)w*y + (size_t)w*h*z;
size_t idx_x_low_y_mid = x_low + (size_t)w*y + (size_t)w*h*z;
size_t idx_x_mid_y_high = x + (size_t)w*y_high + (size_t)w*h*z;
size_t idx_x_mid_y_low = x + (size_t)w*y_low + (size_t)w*h*z;
// Compute gradient
d_gradx[idx] = (3*d_imgIn[idx_x_high_y_high] + 10*d_imgIn[idx_x_high_y_mid]
+ 3*d_imgIn[idx_x_high_y_low] - 3*d_imgIn[idx_x_low_y_high]
- 10*d_imgIn[idx_x_low_y_mid] - 3*d_imgIn[idx_x_low_y_low])/32;
d_grady[idx] = (3*d_imgIn[idx_x_high_y_high] + 10*d_imgIn[idx_x_mid_y_high]
+ 3*d_imgIn[idx_x_low_y_high] - 3*d_imgIn[idx_x_high_y_low]
- 10*d_imgIn[idx_x_mid_y_low] - 3*d_imgIn[idx_x_low_y_low])/32;
}
}
// main
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Diffusion
float tau = 0.01f;
int N = 20000;
// Convolution kernel
float sigma = 0.5f;
float phi = 3.0f;
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
// Define kernel dimensions
int r = ceil(3*sigma);
int w_kernel = r * 2 + 1; //windowing
int h_kernel = w_kernel; //Square kernel
int r_phi = ceil(3*phi);
int w_kernel_phi = r_phi * 2 + 1;
int h_kernel_phi = w_kernel_phi;
// Kernel information
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
cv:: Mat mgradx(h, w, mIn.type());
cv:: Mat mgrady(h, w, mIn.type());
cv:: Mat mdiv(h, w, mIn.type());
cv:: Mat mt1(h, w, CV_32FC1);
cv:: Mat mt2(h, w, CV_32FC1);
cv:: Mat mt3(h, w, CV_32FC1);
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// Get array memory
int nbytes = w * h * nc * sizeof(float);
int nbytes_kernel = w_kernel * h_kernel * sizeof(float);
int nbytes_kernel_phi = w_kernel_phi * h_kernel_phi * sizeof(float);
// allocate raw input image array
float *imgIn = new float[(size_t)nbytes];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// ###
// ###
// ### TODO: Main computation
// ###
// ###
////////////////////////////////////////////////////////////////////// Block setting ///////////////////////////////////////////////////////////////////////
dim3 block = dim3(128, 1, 1);
dim3 grid = dim3((w + block.x - 1) / block.x, (h + block.y - 1) / block.y, (nc + block.z - 1));
float alpha = 0.9f;
float C = 0.0000005f;
Timer timer; timer.start();
////////////////////////////////////////////////////////////////////// Arrays (Device) /////////////////////////////////////////////////////////////////////
// Kernel
float *d_kernel;
float *d_kernel_phi;
// Images
float *d_imgIn;
float *d_imgOut;
// Gradients
float *d_gradx;
float *d_grady;
// Gradients for structure tensor
float *d_gradx_tensor;
float *d_grady_tensor;
float *d_m1;
float *d_m2;
float *d_m3;
float *d_t1;
float *d_t2;
float *d_t3;
// Norm
float *d_norm;
// Divergence
float *d_div;
////////////////////////////////////////////////////////////////////// Arrays (Host) /////////////////////////////////////////////////////////////////////
// Kernel
float *kernel = new float[nbytes_kernel];
float *kernel_phi = new float[nbytes_kernel_phi];
// Structure tensor
float *m1 = new float[w*h];
float *m2 = new float[w*h];
float *m3 = new float[w*h];
float *t1 = new float[w*h];
float *t2 = new float[w*h];
float *t3 = new float[w*h];
// Gradient
float *gradx = new float[nbytes];
float *grady = new float[nbytes];
// Divergence
float *div = new float[nbytes];
////////Create kernel
get_kernel(kernel, w_kernel, h_kernel, pi, sigma);
get_kernel(kernel_phi, w_kernel_phi, h_kernel_phi, pi, phi);
// Processor type
string processor;
////////////////////////////////////////////////////////////////////////// CUDA //////////////////////////////////////////////////////////////////////////
// CUDA malloc
// Kernel
cudaMalloc(&d_kernel, nbytes_kernel); CUDA_CHECK;
cudaMalloc(&d_kernel_phi, nbytes_kernel_phi); CUDA_CHECK;
// Images
cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK;
cudaMalloc(&d_imgOut, nbytes); CUDA_CHECK;
// Gradients
cudaMalloc(&d_gradx, nbytes); CUDA_CHECK;
cudaMalloc(&d_grady, nbytes); CUDA_CHECK;
// Gradients for structure tensor
cudaMalloc(&d_gradx_tensor, nbytes); CUDA_CHECK;
cudaMalloc(&d_grady_tensor, nbytes); CUDA_CHECK;
cudaMalloc(&d_m1, w*h*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_m2, w*h*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_m3, w*h*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_t1, w*h*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_t2, w*h*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_t3, w*h*sizeof(float)); CUDA_CHECK;
// Norm
cudaMalloc(&d_norm, w*h*sizeof(float)); CUDA_CHECK;
// Divergence
cudaMalloc(&d_div, nbytes); CUDA_CHECK;
// CUDA copy
cudaMemcpy(d_kernel, kernel, nbytes_kernel, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_kernel_phi, kernel_phi, nbytes_kernel_phi, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_imgIn, imgIn, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// Iterations
for (size_t i = 0; i < N; i++){
// Initial convolution - structure tensor
convolution_global <<< grid, block >>> (d_imgIn, d_imgOut, d_kernel, w, h, nc, w_kernel, h_kernel); CUDA_CHECK;
// Compute gradient of the convoluted image - structure tensor
rotational_gradient <<< grid, block >>> (d_imgIn, d_gradx_tensor, d_grady_tensor, w, h, nc); CUDA_CHECK;
// Compute m1, m2, and m3 - structure tensor
compute_M <<< grid, block >>> (d_m1, d_m2, d_m3, d_gradx_tensor, d_grady_tensor, w, h, nc); CUDA_CHECK;
// Convolution on m1 - structure tensor
convolution_global <<< grid, block >>> (d_m1, d_t1, d_kernel_phi, w, h, 1, w_kernel_phi, h_kernel_phi); CUDA_CHECK;
// Convolution on m2 - structure tensor
convolution_global <<< grid, block >>> (d_m2, d_t2, d_kernel_phi, w, h, 1, w_kernel_phi, h_kernel_phi); CUDA_CHECK;
// Convolution on m3 - structure tensor
convolution_global <<< grid, block >>> (d_m3, d_t3, d_kernel_phi, w, h, 1, w_kernel_phi, h_kernel_phi); CUDA_CHECK;
// Compute gradient
compute_gradient <<< grid, block >>> (d_gradx, d_grady, d_imgIn, w, h, nc); CUDA_CHECK;
// Apply diffusion tensor
apply_diffusion <<< grid, block >>> (d_gradx, d_grady, d_imgIn, alpha, C, d_t1, d_t2, d_t3, w, h, nc); CUDA_CHECK;
// Compute divergence
compute_divergence <<< grid, block >>> (d_div, d_gradx, d_grady, w, h, nc); CUDA_CHECK;
// Update image
update_image <<< grid, block >>> (d_imgIn, d_div, tau, w, h, nc); CUDA_CHECK;
cout << "Iteration = " << i << endl;
}
// Copy the results to host
cudaMemcpy(imgOut, d_imgIn, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(gradx, d_gradx_tensor, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(grady, d_grady_tensor, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(div, d_div, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(t1, d_t1, w*h*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(t2, d_t2, w*h*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(t3, d_t3, w*h*sizeof(float), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_imgIn); CUDA_CHECK;
cudaFree(d_imgOut); CUDA_CHECK;
cudaFree(d_kernel); CUDA_CHECK;
cudaFree(d_div); CUDA_CHECK;
cudaFree(d_gradx); CUDA_CHECK;
cudaFree(d_grady); CUDA_CHECK;
cudaFree(d_norm); CUDA_CHECK;
cudaFree(d_t1);
cudaFree(d_t2);
cudaFree(d_t3);
// Type of processor
processor = "GPU - global memory";
cout << processor << endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
timer.end(); float t = timer.get();
cout << "time: " << t*1000 << " ms" << endl;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Diffusion", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
convert_layered_to_mat(mgradx, gradx);
convert_layered_to_mat(mgrady, grady);
convert_layered_to_mat(mdiv, div);
convert_layered_to_mat(mt1, t1);
convert_layered_to_mat(mt2, t2);
convert_layered_to_mat(mt3, t3);
//showImage("t1", 10.f*mt1, 50, 250);
//showImage("t2", 10.f*mt2, 50 + w, 250);
//showImage("t3", 10.f*mt3, 50 + 2 * w, 250);
//showImage("grad_x", mgradx, 100+w+50, 150);
//showImage("grad_y", mgrady, 100+w+60, 150);
//showImage("div", mdiv, 100+w+80, 200);
/*
showImage("m1", 10.f*mM1, 50, 200);
showImage("m2", 10.f*mM2, 50 + w, 200);
showImage("m3", 10.f*mM3, 50 + 2 * w, 200);
showImage("t1", 10.f*mT1, 50, 250);
showImage("t2", 10.f*mT2, 50 + w, 250);
showImage("t3", 10.f*mT3, 50 + 2 * w, 250);
*/
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
#ifdef CAMERA
delete[] imgIn;
delete[] imgOut;
#else
delete[] imgIn;
delete[] imgOut;
delete[] kernel;
delete[] gradx;
delete[] grady;
#endif
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
281d218e759c8e329edde49054b6f2338c8f1668.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//////////////////////////////////////////////////////////////////////////
////This is the code implementation for Hanon finger exercise -- threads
////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing
//////////////////////////////////////////////////////////////////////////
#include <cstdio>
#include <vector>
#include <fstream>
using namespace std;
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="Slim_Shaders";
std::string author_1="Andrw_Yang";
std::string author_2="Matthew_Kenney";
};
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for threads, Section 1, 1D array with 1D threads
////Learn how to use the built-in variables threadIdx, blockIdx, gridDim, and blockDim
////Exercise 1 is a sample kernel function, you don't need to implement this function
////Kernel dimension: <<<1,64>>>
////Expected output: 0,1,2,3,4,...,61,62,63
__global__ void Hanon_Exercise_0(int* array)
{
array[threadIdx.x]=threadIdx.x;
}
////Kernel dimension: <<<1,64>>>
////Expected array values: 0,1,0,1,...,0,1
__global__ void Hanon_Exercise_1(int* array)
{
/*TODO: Your implementation*/
array[threadIdx.x]=threadIdx.x%2;
}
////Kernel dimension: <<<1,64>>>
////Expected array values: 1,2,3,4,1,2,3,4,...,1,2,3,4
__global__ void Hanon_Exercise_2(int* array)
{
/*TODO: Your implementation*/
array[threadIdx.x]=1+threadIdx.x%4;
}
////Kernel dimension: <<<1,64>>>
////Expected array values: 0,1,2,3,4,3,2,1,0,1,2,3,4,3,2,1,...,0,1,2,3,4,3,2,1
__global__ void Hanon_Exercise_3(int* array)
{
/*TODO: Your implementation*/
array[threadIdx.x]=threadIdx.x%5;
}
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for threads, Section 2, 1D array with specified-dimension threads
////Kernel dimension: <<<dim3(2,2,1),dim3(4,4,1)>>>
////Expected output: 0,1,2,3,4,...,61,62,63
__global__ void Hanon_Exercise_4(int* array)
{
/*TODO: Your implementation*/
// block 0-1
// thread 0-3
int block_num = blockIdx.y * gridDim.x + blockIdx.x; // block_num = blockIdx.y * 2 + blockIdx.x
int thread_num = threadIdx.y * blockDim.x + threadIdx.x; // thread_num = threadIdx.y * 4 + threadIdx.x`
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id;
}
////Kernel dimension: <<<dim3(2,2,1),dim3(4,4,1)>>>
////Expected array values: 0,1,0,1,...,0,1
__global__ void Hanon_Exercise_5(int* array)
{
/*TODO: Your implementation*/
int block_num = blockIdx.y * gridDim.x + blockIdx.x; // block_num = blockIdx.y * 2 + blockIdx.x
int thread_num = threadIdx.y * blockDim.x + threadIdx.x; // thread_num = threadIdx.y * 4 + threadIdx.x`
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id % 2;
}
////Kernel dimension: <<<8,dim3(2,4)>>>
////Expected array values: 1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,...,16,16,16,16
__global__ void Hanon_Exercise_6(int* array)
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = threadIdx.y * blockDim.x + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id / 4 + 1;
}
////Kernel dimension: <<<8,dim3(2,2,2)>>>
////Expected array values: 1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8,...,1,2,3,4,5,6,7,8
__global__ void Hanon_Exercise_7(int* array)
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id % 8 + 1;
}
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for threads, Section 3, 2D array
////Here we declare a 2D array on device with the size of 8x8
__device__ int b_on_dev[8][8];
////Kernel dimension: <<<1,64>>>
////Expected 2D array values:
////0 1 2 3 4 5 6 7
////8 9 10 11 12 13 14 15
////16 17 18 19 20 21 22 23
////24 25 26 27 28 29 30 31
////32 33 34 35 36 37 38 39
////40 41 42 43 44 45 46 47
////48 49 50 51 52 53 54 55
////56 57 58 59 60 61 62 63
__global__ void Hanon_Exercise_8()
{
/*TODO: Your implementation*/
////Hint: assign values to b_on_dev, e.g., b_on_dev[threadIdx.x][threadIdx.y]=1
b_on_dev[threadIdx.x/8][threadIdx.x%8]= threadIdx.x;
}
////Kernel dimension: <<<1,dim3(8,8)>>>
////Expected 2D array values: the same as Exercise 8
__global__ void Hanon_Exercise_9()
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = (threadIdx.y * blockDim.x) + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
b_on_dev[threadIdx.y][threadIdx.x]= thread_id;
}
////Kernel dimension: <<<1,dim3(8,8)>>>
////Expected 2D array values: the transpose of Exercise 8, i.e.:
////0 8 16 24 32 40 48 56
////1 9 17 25 33 41 49 57
////2 10 18 26 34 42 50 58
////3 11 19 27 35 43 51 59
////4 12 20 28 36 44 52 60
////5 13 21 29 37 45 53 61
////6 14 22 30 38 46 54 62
////7 15 23 31 39 47 55 63
__global__ void Hanon_Exercise_10()
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = (threadIdx.y * blockDim.x) + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
b_on_dev[threadIdx.x][threadIdx.y]= thread_id;
}
////Kernel dimension: <<<dim3(2,2),dim3(4,4)>>>
////Expected 2D array values: 4 repeated blocks
////0 1 2 3 0 1 2 3
////4 5 6 7 4 5 6 7
////8 9 10 11 8 9 10 11
////12 13 14 15 12 13 14 15
////0 1 2 3 0 1 2 3
////4 5 6 7 4 5 6 7
////8 9 10 11 8 9 10 11
////12 13 14 15 12 13 14 15
__global__ void Hanon_Exercise_11()
{
/*TODO: Your implementation*/
int j= blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int thread_num = threadIdx.x * blockDim.y + threadIdx.y;
b_on_dev[i][j]= thread_num;
}
////Your tasks are all done here!
//////////////////////////////////////////////////////////////////////////
ofstream out;
////Helper function: copy the device array to host and print
__host__ void Print_Array_On_Device(const int test_id,const int* array_on_device,const int s)
{
std::vector<int> array_on_host(s);
hipMemcpy(&array_on_host[0],array_on_device,s*sizeof(int),hipMemcpyDeviceToHost);
printf("\nHanon exercise %d:\n",test_id);
out<<"\nHanon exercise "<<test_id<<endl;
for(int i=0;i<s;i++)printf("%d ",array_on_host[i]);printf("\n");
for(int i=0;i<s;i++)out<<array_on_host[i]<<" ";out<<endl;
}
////Helper function: copy the device array to host and print
__host__ void Print_b_On_Device(const int test_id)
{
int b_on_host[8][8];
hipMemcpyFromSymbol((void*)b_on_host,b_on_dev,64*sizeof(int));
printf("\nHanon exercise %d:\n",test_id);
out<<"\nHanon exercise "<<test_id<<endl;
for(int i=0;i<8;i++){
for(int j=0;j<8;j++){
printf("%d\t",b_on_host[i][j]);
out<<b_on_host[i][j]<<"\t";
}
printf("\n");
out<<endl;
}
printf("\n");
out<<endl;
}
////Test your implementation for exercises
////Note: Please do not change this function!
__host__ void Hanon_Exercise_Test()
{
////allocate array on device
const int s=64;
int* array_on_device=0;
hipMalloc((void**)&array_on_device,s*sizeof(int));
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_0), dim3(1),dim3(64), 0, 0, array_on_device);
Print_Array_On_Device(0,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_1), dim3(1),dim3(64), 0, 0, array_on_device);
Print_Array_On_Device(1,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_2), dim3(1),dim3(64), 0, 0, array_on_device);
Print_Array_On_Device(2,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_3), dim3(1),dim3(64), 0, 0, array_on_device);
Print_Array_On_Device(3,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_4), dim3(dim3(2,2,1)),dim3(dim3(4,4,1)), 0, 0, array_on_device);
Print_Array_On_Device(4,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_5), dim3(dim3(2,2,1)),dim3(dim3(4,4,1)), 0, 0, array_on_device);
Print_Array_On_Device(5,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_6), dim3(8),dim3(dim3(2,4)), 0, 0, array_on_device);
Print_Array_On_Device(6,array_on_device,s);
hipMemset(array_on_device,0,s*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_7), dim3(8),dim3(dim3(2,2,2)), 0, 0, array_on_device);
Print_Array_On_Device(7,array_on_device,s);
int* b_on_dev_ptr=0;
hipGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
hipMemset(b_on_dev_ptr,0,64*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_8), dim3(1),dim3(64), 0, 0, );
printf("\nHanon exercise 8:\n");
Print_b_On_Device(8);
b_on_dev_ptr=0;
hipGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
hipMemset(b_on_dev_ptr,0,64*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_9), dim3(1),dim3(dim3(8,8)), 0, 0, );
printf("\nHanon exercise 9:\n");
Print_b_On_Device(9);
b_on_dev_ptr=0;
hipGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
hipMemset(b_on_dev_ptr,0,64*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_10), dim3(1),dim3(dim3(8,8)), 0, 0, );
printf("\nHanon exercise 10:\n");
Print_b_On_Device(10);
b_on_dev_ptr=0;
hipGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
hipMemset(b_on_dev_ptr,0,64*sizeof(int));
hipLaunchKernelGGL(( Hanon_Exercise_11), dim3(dim3(2,2)),dim3(dim3(4,4)), 0, 0, );
printf("\nHanon exercise 11:\n");
Print_b_On_Device(11);
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_exercise_thread.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
Hanon_Exercise_Test();
return 0;
} | 281d218e759c8e329edde49054b6f2338c8f1668.cu | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for Hanon finger exercise -- threads
////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing
//////////////////////////////////////////////////////////////////////////
#include <cstdio>
#include <vector>
#include <fstream>
using namespace std;
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="Slim_Shaders";
std::string author_1="Andrw_Yang";
std::string author_2="Matthew_Kenney";
};
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for threads, Section 1, 1D array with 1D threads
////Learn how to use the built-in variables threadIdx, blockIdx, gridDim, and blockDim
////Exercise 1 is a sample kernel function, you don't need to implement this function
////Kernel dimension: <<<1,64>>>
////Expected output: 0,1,2,3,4,...,61,62,63
__global__ void Hanon_Exercise_0(int* array)
{
array[threadIdx.x]=threadIdx.x;
}
////Kernel dimension: <<<1,64>>>
////Expected array values: 0,1,0,1,...,0,1
__global__ void Hanon_Exercise_1(int* array)
{
/*TODO: Your implementation*/
array[threadIdx.x]=threadIdx.x%2;
}
////Kernel dimension: <<<1,64>>>
////Expected array values: 1,2,3,4,1,2,3,4,...,1,2,3,4
__global__ void Hanon_Exercise_2(int* array)
{
/*TODO: Your implementation*/
array[threadIdx.x]=1+threadIdx.x%4;
}
////Kernel dimension: <<<1,64>>>
////Expected array values: 0,1,2,3,4,3,2,1,0,1,2,3,4,3,2,1,...,0,1,2,3,4,3,2,1
__global__ void Hanon_Exercise_3(int* array)
{
/*TODO: Your implementation*/
array[threadIdx.x]=threadIdx.x%5;
}
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for threads, Section 2, 1D array with specified-dimension threads
////Kernel dimension: <<<dim3(2,2,1),dim3(4,4,1)>>>
////Expected output: 0,1,2,3,4,...,61,62,63
__global__ void Hanon_Exercise_4(int* array)
{
/*TODO: Your implementation*/
// block 0-1
// thread 0-3
int block_num = blockIdx.y * gridDim.x + blockIdx.x; // block_num = blockIdx.y * 2 + blockIdx.x
int thread_num = threadIdx.y * blockDim.x + threadIdx.x; // thread_num = threadIdx.y * 4 + threadIdx.x`
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id;
}
////Kernel dimension: <<<dim3(2,2,1),dim3(4,4,1)>>>
////Expected array values: 0,1,0,1,...,0,1
__global__ void Hanon_Exercise_5(int* array)
{
/*TODO: Your implementation*/
int block_num = blockIdx.y * gridDim.x + blockIdx.x; // block_num = blockIdx.y * 2 + blockIdx.x
int thread_num = threadIdx.y * blockDim.x + threadIdx.x; // thread_num = threadIdx.y * 4 + threadIdx.x`
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id % 2;
}
////Kernel dimension: <<<8,dim3(2,4)>>>
////Expected array values: 1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,...,16,16,16,16
__global__ void Hanon_Exercise_6(int* array)
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = threadIdx.y * blockDim.x + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id / 4 + 1;
}
////Kernel dimension: <<<8,dim3(2,2,2)>>>
////Expected array values: 1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8,...,1,2,3,4,5,6,7,8
__global__ void Hanon_Exercise_7(int* array)
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
array[thread_id] = thread_id % 8 + 1;
}
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for threads, Section 3, 2D array
////Here we declare a 2D array on device with the size of 8x8
__device__ int b_on_dev[8][8];
////Kernel dimension: <<<1,64>>>
////Expected 2D array values:
////0 1 2 3 4 5 6 7
////8 9 10 11 12 13 14 15
////16 17 18 19 20 21 22 23
////24 25 26 27 28 29 30 31
////32 33 34 35 36 37 38 39
////40 41 42 43 44 45 46 47
////48 49 50 51 52 53 54 55
////56 57 58 59 60 61 62 63
__global__ void Hanon_Exercise_8()
{
/*TODO: Your implementation*/
////Hint: assign values to b_on_dev, e.g., b_on_dev[threadIdx.x][threadIdx.y]=1
b_on_dev[threadIdx.x/8][threadIdx.x%8]= threadIdx.x;
}
////Kernel dimension: <<<1,dim3(8,8)>>>
////Expected 2D array values: the same as Exercise 8
__global__ void Hanon_Exercise_9()
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = (threadIdx.y * blockDim.x) + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
b_on_dev[threadIdx.y][threadIdx.x]= thread_id;
}
////Kernel dimension: <<<1,dim3(8,8)>>>
////Expected 2D array values: the transpose of Exercise 8, i.e.:
////0 8 16 24 32 40 48 56
////1 9 17 25 33 41 49 57
////2 10 18 26 34 42 50 58
////3 11 19 27 35 43 51 59
////4 12 20 28 36 44 52 60
////5 13 21 29 37 45 53 61
////6 14 22 30 38 46 54 62
////7 15 23 31 39 47 55 63
__global__ void Hanon_Exercise_10()
{
/*TODO: Your implementation*/
int block_num = blockIdx.x;
int thread_num = (threadIdx.y * blockDim.x) + threadIdx.x;
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int thread_id = block_num * threads_per_block + thread_num;
b_on_dev[threadIdx.x][threadIdx.y]= thread_id;
}
////Kernel dimension: <<<dim3(2,2),dim3(4,4)>>>
////Expected 2D array values: 4 repeated blocks
////0 1 2 3 0 1 2 3
////4 5 6 7 4 5 6 7
////8 9 10 11 8 9 10 11
////12 13 14 15 12 13 14 15
////0 1 2 3 0 1 2 3
////4 5 6 7 4 5 6 7
////8 9 10 11 8 9 10 11
////12 13 14 15 12 13 14 15
__global__ void Hanon_Exercise_11()
{
/*TODO: Your implementation*/
int j= blockIdx.y * blockDim.y + threadIdx.y;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int thread_num = threadIdx.x * blockDim.y + threadIdx.y;
b_on_dev[i][j]= thread_num;
}
////Your tasks are all done here!
//////////////////////////////////////////////////////////////////////////
ofstream out;
////Helper function: copy the device array to host and print
__host__ void Print_Array_On_Device(const int test_id,const int* array_on_device,const int s)
{
std::vector<int> array_on_host(s);
cudaMemcpy(&array_on_host[0],array_on_device,s*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nHanon exercise %d:\n",test_id);
out<<"\nHanon exercise "<<test_id<<endl;
for(int i=0;i<s;i++)printf("%d ",array_on_host[i]);printf("\n");
for(int i=0;i<s;i++)out<<array_on_host[i]<<" ";out<<endl;
}
////Helper function: copy the device array to host and print
__host__ void Print_b_On_Device(const int test_id)
{
int b_on_host[8][8];
cudaMemcpyFromSymbol((void*)b_on_host,b_on_dev,64*sizeof(int));
printf("\nHanon exercise %d:\n",test_id);
out<<"\nHanon exercise "<<test_id<<endl;
for(int i=0;i<8;i++){
for(int j=0;j<8;j++){
printf("%d\t",b_on_host[i][j]);
out<<b_on_host[i][j]<<"\t";
}
printf("\n");
out<<endl;
}
printf("\n");
out<<endl;
}
////Test your implementation for exercises
////Note: Please do not change this function!
__host__ void Hanon_Exercise_Test()
{
////allocate array on device
const int s=64;
int* array_on_device=0;
cudaMalloc((void**)&array_on_device,s*sizeof(int));
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_0<<<1,64>>>(array_on_device);
Print_Array_On_Device(0,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_1<<<1,64>>>(array_on_device);
Print_Array_On_Device(1,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_2<<<1,64>>>(array_on_device);
Print_Array_On_Device(2,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_3<<<1,64>>>(array_on_device);
Print_Array_On_Device(3,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_4<<<dim3(2,2,1),dim3(4,4,1)>>>(array_on_device);
Print_Array_On_Device(4,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_5<<<dim3(2,2,1),dim3(4,4,1)>>>(array_on_device);
Print_Array_On_Device(5,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_6<<<8,dim3(2,4)>>>(array_on_device);
Print_Array_On_Device(6,array_on_device,s);
cudaMemset(array_on_device,0,s*sizeof(int));
Hanon_Exercise_7<<<8,dim3(2,2,2)>>>(array_on_device);
Print_Array_On_Device(7,array_on_device,s);
int* b_on_dev_ptr=0;
cudaGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
cudaMemset(b_on_dev_ptr,0,64*sizeof(int));
Hanon_Exercise_8<<<1,64>>>();
printf("\nHanon exercise 8:\n");
Print_b_On_Device(8);
b_on_dev_ptr=0;
cudaGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
cudaMemset(b_on_dev_ptr,0,64*sizeof(int));
Hanon_Exercise_9<<<1,dim3(8,8)>>>();
printf("\nHanon exercise 9:\n");
Print_b_On_Device(9);
b_on_dev_ptr=0;
cudaGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
cudaMemset(b_on_dev_ptr,0,64*sizeof(int));
Hanon_Exercise_10<<<1,dim3(8,8)>>>();
printf("\nHanon exercise 10:\n");
Print_b_On_Device(10);
b_on_dev_ptr=0;
cudaGetSymbolAddress((void**)&b_on_dev_ptr,b_on_dev);
cudaMemset(b_on_dev_ptr,0,64*sizeof(int));
Hanon_Exercise_11<<<dim3(2,2),dim3(4,4)>>>();
printf("\nHanon exercise 11:\n");
Print_b_On_Device(11);
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_exercise_thread.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
Hanon_Exercise_Test();
return 0;
} |
ab9a46d354c4721485c8b78dd54b607e9d819587.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC2_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC3_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( mergeC4_<T>), dim3(grid), dim3(block), 0, stream,
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const hipStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "merge_caller");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC2_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC3_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const hipStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( splitC4_<T>), dim3(grid), dim3(block), 0, stream,
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall(hipDeviceSynchronize());
}
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const hipStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "split_caller");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| ab9a46d354c4721485c8b78dd54b607e9d819587.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device
{
namespace split_merge
{
template <typename T, size_t elem_size = sizeof(T)>
struct TypeTraits
{
typedef T type;
typedef T type2;
typedef T type3;
typedef T type4;
};
template <typename T>
struct TypeTraits<T, 1>
{
typedef char type;
typedef char2 type2;
typedef char3 type3;
typedef char4 type4;
};
template <typename T>
struct TypeTraits<T, 2>
{
typedef short type;
typedef short2 type2;
typedef short3 type3;
typedef short4 type4;
};
template <typename T>
struct TypeTraits<T, 4>
{
typedef int type;
typedef int2 type2;
typedef int3 type3;
typedef int4 type4;
};
template <typename T>
struct TypeTraits<T, 8>
{
typedef double type;
typedef double2 type2;
//typedef double3 type3;
//typedef double4 type3;
};
typedef void (*MergeFunction)(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream);
typedef void (*SplitFunction)(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream);
//------------------------------------------------------------
// Merge
template <typename T>
__global__ void mergeC2_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type2 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_y[x] = dst_elem;
}
}
template <typename T>
__global__ void mergeC3_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type3 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC3_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
double* dst_y = (double*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[3 * x] = src0_y[x];
dst_y[3 * x + 1] = src1_y[x];
dst_y[3 * x + 2] = src2_y[x];
}
}
template <typename T>
__global__ void mergeC4_(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
typedef typename TypeTraits<T>::type4 dst_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const T* src0_y = (const T*)(src0 + y * src0_step);
const T* src1_y = (const T*)(src1 + y * src1_step);
const T* src2_y = (const T*)(src2 + y * src2_step);
const T* src3_y = (const T*)(src3 + y * src3_step);
dst_type* dst_y = (dst_type*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_type dst_elem;
dst_elem.x = src0_y[x];
dst_elem.y = src1_y[x];
dst_elem.z = src2_y[x];
dst_elem.w = src3_y[x];
dst_y[x] = dst_elem;
}
}
template <>
__global__ void mergeC4_<double>(const uchar* src0, size_t src0_step,
const uchar* src1, size_t src1_step,
const uchar* src2, size_t src2_step,
const uchar* src3, size_t src3_step,
int rows, int cols, uchar* dst, size_t dst_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src0_y = (const double*)(src0 + y * src0_step);
const double* src1_y = (const double*)(src1 + y * src1_step);
const double* src2_y = (const double*)(src2 + y * src2_step);
const double* src3_y = (const double*)(src3 + y * src3_step);
double2* dst_y = (double2*)(dst + y * dst_step);
if (x < cols && y < rows)
{
dst_y[2 * x] = make_double2(src0_y[x], src1_y[x]);
dst_y[2 * x + 1] = make_double2(src2_y[x], src3_y[x]);
}
}
template <typename T>
static void mergeC2_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC2_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC3_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC3_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void mergeC4_(const PtrStepSzb* src, PtrStepSzb& dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
mergeC4_<T><<<grid, block, 0, stream>>>(
src[0].data, src[0].step,
src[1].data, src[1].step,
src[2].data, src[2].step,
src[3].data, src[3].step,
dst.rows, dst.cols, dst.data, dst.step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void merge_caller(const PtrStepSzb* src, PtrStepSzb& dst,
int total_channels, size_t elem_size,
const cudaStream_t& stream)
{
static MergeFunction merge_func_tbl[] =
{
mergeC2_<char>, mergeC2_<short>, mergeC2_<int>, 0, mergeC2_<double>,
mergeC3_<char>, mergeC3_<short>, mergeC3_<int>, 0, mergeC3_<double>,
mergeC4_<char>, mergeC4_<short>, mergeC4_<int>, 0, mergeC4_<double>,
};
size_t merge_func_id = (total_channels - 2) * 5 + (elem_size >> 1);
MergeFunction merge_func = merge_func_tbl[merge_func_id];
if (merge_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "merge_caller");
merge_func(src, dst, stream);
}
//------------------------------------------------------------
// Split
template <typename T>
__global__ void splitC2_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step)
{
typedef typename TypeTraits<T>::type2 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
}
}
template <typename T>
__global__ void splitC3_(const uchar* src, size_t src_step,
int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
typedef typename TypeTraits<T>::type3 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
}
}
template <>
__global__ void splitC3_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double* src_y = (const double*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
if (x < cols && y < rows)
{
dst0_y[x] = src_y[3 * x];
dst1_y[x] = src_y[3 * x + 1];
dst2_y[x] = src_y[3 * x + 2];
}
}
template <typename T>
__global__ void splitC4_(const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
typedef typename TypeTraits<T>::type4 src_type;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const src_type* src_y = (const src_type*)(src + y * src_step);
T* dst0_y = (T*)(dst0 + y * dst0_step);
T* dst1_y = (T*)(dst1 + y * dst1_step);
T* dst2_y = (T*)(dst2 + y * dst2_step);
T* dst3_y = (T*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
src_type src_elem = src_y[x];
dst0_y[x] = src_elem.x;
dst1_y[x] = src_elem.y;
dst2_y[x] = src_elem.z;
dst3_y[x] = src_elem.w;
}
}
template <>
__global__ void splitC4_<double>(
const uchar* src, size_t src_step, int rows, int cols,
uchar* dst0, size_t dst0_step,
uchar* dst1, size_t dst1_step,
uchar* dst2, size_t dst2_step,
uchar* dst3, size_t dst3_step)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const double2* src_y = (const double2*)(src + y * src_step);
double* dst0_y = (double*)(dst0 + y * dst0_step);
double* dst1_y = (double*)(dst1 + y * dst1_step);
double* dst2_y = (double*)(dst2 + y * dst2_step);
double* dst3_y = (double*)(dst3 + y * dst3_step);
if (x < cols && y < rows)
{
double2 src_elem1 = src_y[2 * x];
double2 src_elem2 = src_y[2 * x + 1];
dst0_y[x] = src_elem1.x;
dst1_y[x] = src_elem1.y;
dst2_y[x] = src_elem2.x;
dst3_y[x] = src_elem2.y;
}
}
template <typename T>
static void splitC2_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC2_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC3_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC3_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
template <typename T>
static void splitC4_(const PtrStepSzb& src, PtrStepSzb* dst, const cudaStream_t& stream)
{
dim3 block(32, 8);
dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
splitC4_<T><<<grid, block, 0, stream>>>(
src.data, src.step, src.rows, src.cols,
dst[0].data, dst[0].step,
dst[1].data, dst[1].step,
dst[2].data, dst[2].step,
dst[3].data, dst[3].step);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void split_caller(const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream)
{
static SplitFunction split_func_tbl[] =
{
splitC2_<char>, splitC2_<short>, splitC2_<int>, 0, splitC2_<double>,
splitC3_<char>, splitC3_<short>, splitC3_<int>, 0, splitC3_<double>,
splitC4_<char>, splitC4_<short>, splitC4_<int>, 0, splitC4_<double>,
};
size_t split_func_id = (num_channels - 2) * 5 + (elem_size1 >> 1);
SplitFunction split_func = split_func_tbl[split_func_id];
if (split_func == 0)
cv::gpu::error("Unsupported channel count or data type", __FILE__, __LINE__, "split_caller");
split_func(src, dst, stream);
}
} // namespace split_merge
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
1a54c1b0c9e2fbfe2bdb089f7e7463681271757c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void fDerSigmoid( const float* arguments, float* results, const long size ) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
const float argument = arguments[index];
results[index] = argument - argument * argument;
}
} | 1a54c1b0c9e2fbfe2bdb089f7e7463681271757c.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void fDerSigmoid( const float* arguments, float* results, const long size ) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
const float argument = arguments[index];
results[index] = argument - argument * argument;
}
} |
1fbada0c601d5c2d71f28dde20bf09f9bdefc34f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
__global__ void HeatEq(float* d_a, float* d_b, double s)
{
int i = threadIdx.x;
d_b[i+1] = d_a[i+1]+s*(d_a[i+2]+d_a[i]-2*d_a[i+1]);
}
int main(int argc, char** argv)
{
const int n = 16;
const int BYTES = n * sizeof(float);
float h_a[n];
float h_b[n];
double s = 0.25;
for (int i=0; i < n; i++)
{
h_a[i]=0;
}
h_a[5]=h_a[8]=0.1;
h_a[6]=h_a[7]=0.2;
//declare GPU memory pointers
float *d_a;
float *d_b;
//allocate memory on the device
hipMalloc((void**)&d_a,BYTES);
hipMalloc((void**)&d_b,BYTES);
//transfer the array to the GPU
//destination, source, size, method
hipMemcpy(d_b,h_b,BYTES,hipMemcpyHostToDevice);
hipMemcpy(d_a,h_a,BYTES,hipMemcpyHostToDevice);
//launch the kernel
for (int i=0; i<25; i++) {
hipLaunchKernelGGL(( HeatEq), dim3(1),dim3((n-2)), 0, 0, d_a,d_b,s);
hipMemcpy(d_a,d_b,BYTES,hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
}
//copy the results back onto the device
//destination, source, size, method
hipMemcpy(h_b,d_b,BYTES,hipMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
printf("%d \t %.5f",i,h_b[i]);
printf("\n");
}
printf("\n \n");
//free memory previously allocated on the device
hipFree(d_a);
hipFree(d_b);
}
| 1fbada0c601d5c2d71f28dde20bf09f9bdefc34f.cu | #include<stdio.h>
#include<cuda.h>
__global__ void HeatEq(float* d_a, float* d_b, double s)
{
int i = threadIdx.x;
d_b[i+1] = d_a[i+1]+s*(d_a[i+2]+d_a[i]-2*d_a[i+1]);
}
int main(int argc, char** argv)
{
const int n = 16;
const int BYTES = n * sizeof(float);
float h_a[n];
float h_b[n];
double s = 0.25;
for (int i=0; i < n; i++)
{
h_a[i]=0;
}
h_a[5]=h_a[8]=0.1;
h_a[6]=h_a[7]=0.2;
//declare GPU memory pointers
float *d_a;
float *d_b;
//allocate memory on the device
cudaMalloc((void**)&d_a,BYTES);
cudaMalloc((void**)&d_b,BYTES);
//transfer the array to the GPU
//destination, source, size, method
cudaMemcpy(d_b,h_b,BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_a,h_a,BYTES,cudaMemcpyHostToDevice);
//launch the kernel
for (int i=0; i<25; i++) {
HeatEq<<<1,(n-2)>>>(d_a,d_b,s);
cudaMemcpy(d_a,d_b,BYTES,cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
//copy the results back onto the device
//destination, source, size, method
cudaMemcpy(h_b,d_b,BYTES,cudaMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
printf("%d \t %.5f",i,h_b[i]);
printf("\n");
}
printf("\n \n");
//free memory previously allocated on the device
cudaFree(d_a);
cudaFree(d_b);
}
|
eaa693ffc596ac472194a5f3bb1485315a07ea96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../../../include/ttrack/track/localizer/levelsets/pwp3d_cuda.hpp"
__global__ void testCudaFunction(int *a, int *b, int *c){
*c = *a + *b;
//data[0] = 4;
}
bool ttrk::gpu::checkCudaFunctionality(){
int ct;
hipGetDeviceCount(&ct);
if (ct == 0){
return false;
}
hipError_t code = hipGetLastError();
for (int dev = 0; dev < ct; ++dev){
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, dev);
}
return true;
//
//int a, b, c;
//a = 4;
//b = 6;
//int *a_d, *b_d, *c_d;
//hipMalloc((void **)&a_d, sizeof(int));
//hipMalloc((void **)&b_d, sizeof(int));
//hipMalloc((void **)&c_d, sizeof(int));
//hipMemcpy(a_d, &a, sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(b_d, &b, sizeof(int), hipMemcpyHostToDevice);
//testCudaFunction<<<1,1>>>(a_d, b_d, c_d);
//hipMemcpy(&c, c_d, sizeof(int), hipMemcpyDeviceToHost);
//code = hipGetLastError();
//
//hipMemcpy(&c, c_d, sizeof(int), hipMemcpyDeviceToHost);
//hipFree(a_d);
//hipFree(b_d);
//hipFree(c_d);
////ci::app::console() << "P = " << p << std::endl;
//int j = 0;
//int x = j + 3;
} | eaa693ffc596ac472194a5f3bb1485315a07ea96.cu | #include "../../../../include/ttrack/track/localizer/levelsets/pwp3d_cuda.hpp"
__global__ void testCudaFunction(int *a, int *b, int *c){
*c = *a + *b;
//data[0] = 4;
}
bool ttrk::gpu::checkCudaFunctionality(){
int ct;
cudaGetDeviceCount(&ct);
if (ct == 0){
return false;
}
cudaError_t code = cudaGetLastError();
for (int dev = 0; dev < ct; ++dev){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, dev);
}
return true;
//
//int a, b, c;
//a = 4;
//b = 6;
//int *a_d, *b_d, *c_d;
//cudaMalloc((void **)&a_d, sizeof(int));
//cudaMalloc((void **)&b_d, sizeof(int));
//cudaMalloc((void **)&c_d, sizeof(int));
//cudaMemcpy(a_d, &a, sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(b_d, &b, sizeof(int), cudaMemcpyHostToDevice);
//testCudaFunction<<<1,1>>>(a_d, b_d, c_d);
//cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost);
//code = cudaGetLastError();
//
//cudaMemcpy(&c, c_d, sizeof(int), cudaMemcpyDeviceToHost);
//cudaFree(a_d);
//cudaFree(b_d);
//cudaFree(c_d);
////ci::app::console() << "P = " << p << std::endl;
//int j = 0;
//int x = j + 3;
} |
e7da69c7b8e72b5d8983113132e9540372db8a8b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*
* recommended g++ options:
* -O3 -lm -ffast-math -funroll-all-loops
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <malloc.h>
#include "parboil.h"
#include "file.h"
#include "computeQ.hip"
void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d)
{
int phiMagBlocks = numK / KERNEL_PHI_MAG_THREADS_PER_BLOCK;
if (numK % KERNEL_PHI_MAG_THREADS_PER_BLOCK)
phiMagBlocks++;
dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1);
dim3 DimPhiMagGrid(phiMagBlocks, 1);
hipLaunchKernelGGL(( ComputePhiMag_GPU) , dim3(DimPhiMagGrid), dim3(DimPhiMagBlock) , 0, 0,
phiR_d, phiI_d, phiMag_d, numK);
}
void computeQ_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* Qr_d, float* Qi_d)
{
int QGrids = numK / KERNEL_Q_K_ELEMS_PER_GRID;
if (numK % KERNEL_Q_K_ELEMS_PER_GRID)
QGrids++;
int QBlocks = numX / KERNEL_Q_THREADS_PER_BLOCK;
if (numX % KERNEL_Q_THREADS_PER_BLOCK)
QBlocks++;
dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1);
dim3 DimQGrid(QBlocks, 1);
for (int QGrid = 0; QGrid < QGrids; QGrid++) {
// Put the tile of K values into constant mem
int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase);
hipMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0);
hipLaunchKernelGGL(( ComputeQ_GPU) , dim3(DimQGrid), dim3(DimQBlock) , 0, 0,
numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) memalign(16, numK * sizeof(float));
*Qr = (float*) memalign(16, numX * sizeof (float));
*Qi = (float*) memalign(16, numX * sizeof (float));
}
static void
setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr)
{
hipMalloc ((void **) &dev_ptr, num * size);
CUDA_ERRCK;
hipMemcpy (dev_ptr, host_ptr, num * size, hipMemcpyHostToDevice);
CUDA_ERRCK;
}
static void
cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr)
{
hipMemcpy (host_ptr, dev_ptr, num * size, hipMemcpyDeviceToHost);
CUDA_ERRCK;
hipFree(dev_ptr);
CUDA_ERRCK;
}
int
main (int argc, char *argv[]) {
int numX, numK; /* Number of X and K values */
int original_numK; /* Number of K values in input file */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
float *Qr, *Qi; /* Q signal (complex) */
struct kValues* kVals;
//struct pb_Parameters *params;
//struct pb_TimerSet timers;
//pb_InitializeTimerSet(&timers);
/* Read command line */
//params = pb_ReadParameters(&argc, argv);
/* Initialize the parameters structure */
struct pb_Parameters *params = (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));
params->outFile = NULL;
params->inpFiles = (char **)malloc(sizeof(char *));
params->inpFiles[0] = NULL;
// Read input from command line
#ifdef SIZE0
params->inpFiles[0] = "~/software/parboil-2.5/datasets/mri-q/small/input/32_32_32_dataset.bin";
#endif
#ifdef SIZE1
params->inpFiles[0] = "~/software/parboil-2.5/datasets/mri-q/large/input/64_64_64_dataset.bin";
#endif
/*
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
*/
/* Read in data */
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
inputData(params->inpFiles[0],
&original_numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
/* Reduce the number of k-space samples if a number is given
* on the command line */
if (argc < 2)
numK = original_numK;
else
{
int inputK;
char *end;
inputK = strtol(argv[1], &end, 10);
if (end == argv[1])
{
fprintf(stderr, "Expecting an integer parameter\n");
exit(-1);
}
numK = MIN(inputK, original_numK);
}
printf("%d pixels in output; %d samples in trajectory; using %d samples\n",
numX, original_numK, numK);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
/* Create CPU data structures */
createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi);
/* GPU section 1 (precompute PhiMag) */
{
/* Mirror several data structures on the device */
float *phiR_d, *phiI_d;
float *phiMag_d;
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numK, sizeof(float), phiR_d, phiR);
setupMemoryGPU(numK, sizeof(float), phiI_d, phiI);
hipMalloc((void **)&phiMag_d, numK * sizeof(float));
CUDA_ERRCK;
hipDeviceSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d);
hipDeviceSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag);
hipFree(phiR_d);
hipFree(phiI_d);
}
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
free(phiMag);
/* GPU section 2 */
{
float *x_d, *y_d, *z_d;
float *Qr_d, *Qi_d;
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numX, sizeof(float), x_d, x);
setupMemoryGPU(numX, sizeof(float), y_d, y);
setupMemoryGPU(numX, sizeof(float), z_d, z);
hipMalloc((void **)&Qr_d, numX * sizeof(float));
CUDA_ERRCK;
hipMemset((void *)Qr_d, 0, numX * sizeof(float));
hipMalloc((void **)&Qi_d, numX * sizeof(float));
CUDA_ERRCK;
hipMemset((void *)Qi_d, 0, numX * sizeof(float));
hipDeviceSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d);
hipDeviceSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr);
cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi);
}
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (params->outFile)
{
/* Write Q to file */
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(params->outFile, Qr, Qi, numX);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (kVals);
free (Qr);
free (Qi);
//pb_SwitchToTimer(&timers, pb_TimerID_NONE);
//pb_PrintTimerSet(&timers);
//pb_FreeParameters(params);
return 0;
}
void inputData(char* fName, int* _numK, int* _numX,
float** kx, float** ky, float** kz,
float** x, float** y, float** z,
float** phiR, float** phiI)
{
int numK, numX;
FILE* fid = fopen(fName, "r");
size_t temp;
if (fid == NULL)
{
fprintf(stderr, "Cannot open input file\n");
exit(-1);
}
temp = fread (&numK, sizeof (int), 1, fid);
*_numK = numK;
temp = fread (&numX, sizeof (int), 1, fid);
*_numX = numX;
*kx = (float *) memalign(16, numK * sizeof (float));
temp = fread (*kx, sizeof (float), numK, fid);
*ky = (float *) memalign(16, numK * sizeof (float));
temp = fread (*ky, sizeof (float), numK, fid);
*kz = (float *) memalign(16, numK * sizeof (float));
temp = fread (*kz, sizeof (float), numK, fid);
*x = (float *) memalign(16, numX * sizeof (float));
temp = fread (*x, sizeof (float), numX, fid);
*y = (float *) memalign(16, numX * sizeof (float));
temp = fread (*y, sizeof (float), numX, fid);
*z = (float *) memalign(16, numX * sizeof (float));
temp = fread (*z, sizeof (float), numX, fid);
*phiR = (float *) memalign(16, numK * sizeof (float));
temp = fread (*phiR, sizeof (float), numK, fid);
*phiI = (float *) memalign(16, numK * sizeof (float));
temp = fread (*phiI, sizeof (float), numK, fid);
temp += numK;
fclose (fid);
}
void outputData(char* fName, float* outR, float* outI, int numX)
{
FILE* fid = fopen(fName, "w");
uint32_t tmp32;
if (fid == NULL)
{
fprintf(stderr, "Cannot open output file\n");
exit(-1);
}
/* Write the data size */
tmp32 = numX;
fwrite(&tmp32, sizeof(uint32_t), 1, fid);
/* Write the reconstructed data */
fwrite (outR, sizeof (float), numX, fid);
fwrite (outI, sizeof (float), numX, fid);
fclose (fid);
}
| e7da69c7b8e72b5d8983113132e9540372db8a8b.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*
* recommended g++ options:
* -O3 -lm -ffast-math -funroll-all-loops
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <malloc.h>
#include "parboil.h"
#include "file.h"
#include "computeQ.cu"
void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d)
{
int phiMagBlocks = numK / KERNEL_PHI_MAG_THREADS_PER_BLOCK;
if (numK % KERNEL_PHI_MAG_THREADS_PER_BLOCK)
phiMagBlocks++;
dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1);
dim3 DimPhiMagGrid(phiMagBlocks, 1);
ComputePhiMag_GPU <<< DimPhiMagGrid, DimPhiMagBlock >>>
(phiR_d, phiI_d, phiMag_d, numK);
}
void computeQ_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* Qr_d, float* Qi_d)
{
int QGrids = numK / KERNEL_Q_K_ELEMS_PER_GRID;
if (numK % KERNEL_Q_K_ELEMS_PER_GRID)
QGrids++;
int QBlocks = numX / KERNEL_Q_THREADS_PER_BLOCK;
if (numX % KERNEL_Q_THREADS_PER_BLOCK)
QBlocks++;
dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1);
dim3 DimQGrid(QBlocks, 1);
for (int QGrid = 0; QGrid < QGrids; QGrid++) {
// Put the tile of K values into constant mem
int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase);
cudaMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0);
ComputeQ_GPU <<< DimQGrid, DimQBlock >>>
(numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) memalign(16, numK * sizeof(float));
*Qr = (float*) memalign(16, numX * sizeof (float));
*Qi = (float*) memalign(16, numX * sizeof (float));
}
static void
setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr)
{
cudaMalloc ((void **) &dev_ptr, num * size);
CUDA_ERRCK;
cudaMemcpy (dev_ptr, host_ptr, num * size, cudaMemcpyHostToDevice);
CUDA_ERRCK;
}
static void
cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr)
{
cudaMemcpy (host_ptr, dev_ptr, num * size, cudaMemcpyDeviceToHost);
CUDA_ERRCK;
cudaFree(dev_ptr);
CUDA_ERRCK;
}
int
main (int argc, char *argv[]) {
int numX, numK; /* Number of X and K values */
int original_numK; /* Number of K values in input file */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
float *Qr, *Qi; /* Q signal (complex) */
struct kValues* kVals;
//struct pb_Parameters *params;
//struct pb_TimerSet timers;
//pb_InitializeTimerSet(&timers);
/* Read command line */
//params = pb_ReadParameters(&argc, argv);
/* Initialize the parameters structure */
struct pb_Parameters *params = (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));
params->outFile = NULL;
params->inpFiles = (char **)malloc(sizeof(char *));
params->inpFiles[0] = NULL;
// Read input from command line
#ifdef SIZE0
params->inpFiles[0] = "~/software/parboil-2.5/datasets/mri-q/small/input/32_32_32_dataset.bin";
#endif
#ifdef SIZE1
params->inpFiles[0] = "~/software/parboil-2.5/datasets/mri-q/large/input/64_64_64_dataset.bin";
#endif
/*
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
*/
/* Read in data */
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
inputData(params->inpFiles[0],
&original_numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
/* Reduce the number of k-space samples if a number is given
* on the command line */
if (argc < 2)
numK = original_numK;
else
{
int inputK;
char *end;
inputK = strtol(argv[1], &end, 10);
if (end == argv[1])
{
fprintf(stderr, "Expecting an integer parameter\n");
exit(-1);
}
numK = MIN(inputK, original_numK);
}
printf("%d pixels in output; %d samples in trajectory; using %d samples\n",
numX, original_numK, numK);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
/* Create CPU data structures */
createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi);
/* GPU section 1 (precompute PhiMag) */
{
/* Mirror several data structures on the device */
float *phiR_d, *phiI_d;
float *phiMag_d;
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numK, sizeof(float), phiR_d, phiR);
setupMemoryGPU(numK, sizeof(float), phiI_d, phiI);
cudaMalloc((void **)&phiMag_d, numK * sizeof(float));
CUDA_ERRCK;
cudaThreadSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d);
cudaThreadSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag);
cudaFree(phiR_d);
cudaFree(phiI_d);
}
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
free(phiMag);
/* GPU section 2 */
{
float *x_d, *y_d, *z_d;
float *Qr_d, *Qi_d;
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numX, sizeof(float), x_d, x);
setupMemoryGPU(numX, sizeof(float), y_d, y);
setupMemoryGPU(numX, sizeof(float), z_d, z);
cudaMalloc((void **)&Qr_d, numX * sizeof(float));
CUDA_ERRCK;
cudaMemset((void *)Qr_d, 0, numX * sizeof(float));
cudaMalloc((void **)&Qi_d, numX * sizeof(float));
CUDA_ERRCK;
cudaMemset((void *)Qi_d, 0, numX * sizeof(float));
cudaThreadSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d);
cudaThreadSynchronize();
//pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr);
cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi);
}
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (params->outFile)
{
/* Write Q to file */
//pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(params->outFile, Qr, Qi, numX);
//pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (kVals);
free (Qr);
free (Qi);
//pb_SwitchToTimer(&timers, pb_TimerID_NONE);
//pb_PrintTimerSet(&timers);
//pb_FreeParameters(params);
return 0;
}
void inputData(char* fName, int* _numK, int* _numX,
float** kx, float** ky, float** kz,
float** x, float** y, float** z,
float** phiR, float** phiI)
{
int numK, numX;
FILE* fid = fopen(fName, "r");
size_t temp;
if (fid == NULL)
{
fprintf(stderr, "Cannot open input file\n");
exit(-1);
}
temp = fread (&numK, sizeof (int), 1, fid);
*_numK = numK;
temp = fread (&numX, sizeof (int), 1, fid);
*_numX = numX;
*kx = (float *) memalign(16, numK * sizeof (float));
temp = fread (*kx, sizeof (float), numK, fid);
*ky = (float *) memalign(16, numK * sizeof (float));
temp = fread (*ky, sizeof (float), numK, fid);
*kz = (float *) memalign(16, numK * sizeof (float));
temp = fread (*kz, sizeof (float), numK, fid);
*x = (float *) memalign(16, numX * sizeof (float));
temp = fread (*x, sizeof (float), numX, fid);
*y = (float *) memalign(16, numX * sizeof (float));
temp = fread (*y, sizeof (float), numX, fid);
*z = (float *) memalign(16, numX * sizeof (float));
temp = fread (*z, sizeof (float), numX, fid);
*phiR = (float *) memalign(16, numK * sizeof (float));
temp = fread (*phiR, sizeof (float), numK, fid);
*phiI = (float *) memalign(16, numK * sizeof (float));
temp = fread (*phiI, sizeof (float), numK, fid);
temp += numK;
fclose (fid);
}
void outputData(char* fName, float* outR, float* outI, int numX)
{
FILE* fid = fopen(fName, "w");
uint32_t tmp32;
if (fid == NULL)
{
fprintf(stderr, "Cannot open output file\n");
exit(-1);
}
/* Write the data size */
tmp32 = numX;
fwrite(&tmp32, sizeof(uint32_t), 1, fid);
/* Write the reconstructed data */
fwrite (outR, sizeof (float), numX, fid);
fwrite (outI, sizeof (float), numX, fid);
fclose (fid);
}
|
61c9e3c0bf6d829e8be6011796a7e89252c6d014.hip | // !!! This is a file automatically generated by hipify!!!
#include "distconv/tensor/halo_exchange_cuda.hpp"
#include "distconv/tensor/halo_cuda.hpp"
#include "distconv/util/util_mpi.hpp"
#include "distconv/tensor/halo_packing_cuda.hpp"
#include <limits>
namespace distconv {
namespace tensor {
template <>
void HaloExchange<float, HIPAllocator, Al::NCCLBackend>::
pack_or_unpack(int dim,
Side side,
int width,
h2::gpu::DeviceStream stream,
void* buf,
bool is_pack,
bool is_reverse,
HaloExchangeAccumOp op)
{
halo_exchange_cuda::pack_or_unpack<float>(
m_tensor, dim, side, width, stream, buf, is_pack, is_reverse, op);
}
template <>
void HaloExchange<double, HIPAllocator, Al::NCCLBackend>::
pack_or_unpack(int dim,
Side side,
int width,
h2::gpu::DeviceStream stream,
void* buf,
bool is_pack,
bool is_reverse,
HaloExchangeAccumOp op)
{
halo_exchange_cuda::pack_or_unpack<double>(
m_tensor, dim, side, width, stream, buf, is_pack, is_reverse, op);
}
} // namespace tensor
} // namespace distconv
| 61c9e3c0bf6d829e8be6011796a7e89252c6d014.cu | #include "distconv/tensor/halo_exchange_cuda.hpp"
#include "distconv/tensor/halo_cuda.hpp"
#include "distconv/util/util_mpi.hpp"
#include "distconv/tensor/halo_packing_cuda.hpp"
#include <limits>
namespace distconv {
namespace tensor {
template <>
void HaloExchange<float, CUDAAllocator, Al::NCCLBackend>::
pack_or_unpack(int dim,
Side side,
int width,
h2::gpu::DeviceStream stream,
void* buf,
bool is_pack,
bool is_reverse,
HaloExchangeAccumOp op)
{
halo_exchange_cuda::pack_or_unpack<float>(
m_tensor, dim, side, width, stream, buf, is_pack, is_reverse, op);
}
template <>
void HaloExchange<double, CUDAAllocator, Al::NCCLBackend>::
pack_or_unpack(int dim,
Side side,
int width,
h2::gpu::DeviceStream stream,
void* buf,
bool is_pack,
bool is_reverse,
HaloExchangeAccumOp op)
{
halo_exchange_cuda::pack_or_unpack<double>(
m_tensor, dim, side, width, stream, buf, is_pack, is_reverse, op);
}
} // namespace tensor
} // namespace distconv
|
6d91c64c759182030565a22b7d91cd0df246c5c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 2;
}
__global__ void foo(int *y, int x) {
*y = f(x);
}
int main() {
int a=2;
int b=0;
int *dev_a;
hipMalloc((void**)&dev_a, sizeof(int));
hipMemcpy(dev_a, &a, sizeof(int), hipMemcpyHostToDevice);
//foo<<<1, N>>>(dev_a, a);
ESBMC_verify_kernel_intt(foo, 1, N, dev_a, a);
hipMemcpy(&b, dev_a, sizeof(int), hipMemcpyDeviceToHost);
assert (b != a+2);
hipFree(dev_a);
return 0;
}
| 6d91c64c759182030565a22b7d91cd0df246c5c5.cu | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 2;
}
__global__ void foo(int *y, int x) {
*y = f(x);
}
int main() {
int a=2;
int b=0;
int *dev_a;
cudaMalloc((void**)&dev_a, sizeof(int));
cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice);
//foo<<<1, N>>>(dev_a, a);
ESBMC_verify_kernel_intt(foo, 1, N, dev_a, a);
cudaMemcpy(&b, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
assert (b != a+2);
cudaFree(dev_a);
return 0;
}
|
639b524dc6964be60c9fc312733f9c35e5af9f26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/unpooling.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
const int* indices_data,
const int input_height, const int input_width,
const int channels, T* output_data,
const int output_height,
const int output_width) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_width / input_height) % channels;
int n = linearIndex / input_width / input_height / channels;
output_data += (n * channels + c) * output_height * output_width;
int maxind = indices_data[linearIndex];
output_data[maxind] = input_data[linearIndex];
}
}
template <typename T>
__global__ void KernelUnpool2dMaxGrad(
const int nthreads, const T* input_data, const int* indices_data,
const int input_height, const int input_width, const int channels,
const T* output_data, const T* output_grad, const int output_height,
const int output_width, T* input_grad) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_width / input_height) % channels;
int n = linearIndex / input_width / input_height / channels;
output_grad += (n * channels + c) * output_height * output_width;
int maxind = indices_data[linearIndex];
input_grad[linearIndex] = output_grad[maxind];
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
__global__ void KernelUnpool3dMax(const int nthreads, const T* input_data,
const int* indices_data,
const int input_depth, const int input_height,
const int input_width, const int channels,
T* output_data, const int output_depth,
const int output_height,
const int output_width) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_depth / input_width / input_height) % channels;
int n = linearIndex / input_depth / input_width / input_height / channels;
output_data +=
(n * channels + c) * output_depth * output_height * output_width;
int maxind = indices_data[linearIndex];
output_data[maxind] = input_data[linearIndex];
}
}
template <typename T>
__global__ void KernelUnpool3dMaxGrad(
const int nthreads, const T* input_data, const int* indices_data,
const int input_depth, const int input_height, const int input_width,
const int channels, const T* output_data, const T* output_grad,
const int output_depth, const int output_height, const int output_width,
T* input_grad) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_depth / input_width / input_height) % channels;
int n = linearIndex / input_depth / input_width / input_height / channels;
output_grad +=
(n * channels + c) * output_depth * output_height * output_width;
int maxind = indices_data[linearIndex];
input_grad[linearIndex] = output_grad[maxind];
}
}
/*
* All tensors are in NCDHW format.
*/
template <typename T>
class Unpool2dMaxFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
T* output_data = output->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
hipLaunchKernelGGL(( KernelUnpool2dMax<T>), dim3(grid), dim3(threads), 0, context.stream(),
input.numel(), input_data, indices_data, input_height, input_width,
output_channels, output_data, output_height, output_width);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class Unpool2dMaxGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices,
const framework::Tensor& output,
const framework::Tensor& output_grad,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
hipLaunchKernelGGL(( KernelUnpool2dMaxGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
input.numel(), input_data, indices_data, input_height, input_width,
output_channels, output_data, output_grad_data, output_height,
output_width, input_grad_data);
}
};
template <typename T>
class Unpool3dMaxFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
T* output_data = output->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
hipLaunchKernelGGL(( KernelUnpool3dMax<T>), dim3(grid), dim3(threads), 0, context.stream(),
input.numel(), input_data, indices_data, input_depth, input_height,
input_width, output_channels, output_data, output_depth, output_height,
output_width);
}
};
/*
* All tensors are in NCDHW format.
*/
template <typename T>
class Unpool3dMaxGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices,
const framework::Tensor& output,
const framework::Tensor& output_grad,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
hipLaunchKernelGGL(( KernelUnpool3dMaxGrad<T>), dim3(grid), dim3(threads), 0, context.stream(),
input.numel(), input_data, indices_data, input_depth, input_height,
input_width, output_channels, output_data, output_grad_data,
output_depth, output_height, output_width, input_grad_data);
}
};
template class Unpool2dMaxGradFunctor<platform::CUDADeviceContext, float>;
template class Unpool2dMaxGradFunctor<platform::CUDADeviceContext, double>;
template class Unpool2dMaxFunctor<platform::CUDADeviceContext, float>;
template class Unpool2dMaxFunctor<platform::CUDADeviceContext, double>;
template class Unpool3dMaxGradFunctor<platform::CUDADeviceContext, float>;
template class Unpool3dMaxGradFunctor<platform::CUDADeviceContext, double>;
template class Unpool3dMaxFunctor<platform::CUDADeviceContext, float>;
template class Unpool3dMaxFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 639b524dc6964be60c9fc312733f9c35e5af9f26.cu | /* Copyright (c) 2022 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/unpooling.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelUnpool2dMax(const int nthreads, const T* input_data,
const int* indices_data,
const int input_height, const int input_width,
const int channels, T* output_data,
const int output_height,
const int output_width) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_width / input_height) % channels;
int n = linearIndex / input_width / input_height / channels;
output_data += (n * channels + c) * output_height * output_width;
int maxind = indices_data[linearIndex];
output_data[maxind] = input_data[linearIndex];
}
}
template <typename T>
__global__ void KernelUnpool2dMaxGrad(
const int nthreads, const T* input_data, const int* indices_data,
const int input_height, const int input_width, const int channels,
const T* output_data, const T* output_grad, const int output_height,
const int output_width, T* input_grad) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_width / input_height) % channels;
int n = linearIndex / input_width / input_height / channels;
output_grad += (n * channels + c) * output_height * output_width;
int maxind = indices_data[linearIndex];
input_grad[linearIndex] = output_grad[maxind];
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
__global__ void KernelUnpool3dMax(const int nthreads, const T* input_data,
const int* indices_data,
const int input_depth, const int input_height,
const int input_width, const int channels,
T* output_data, const int output_depth,
const int output_height,
const int output_width) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_depth / input_width / input_height) % channels;
int n = linearIndex / input_depth / input_width / input_height / channels;
output_data +=
(n * channels + c) * output_depth * output_height * output_width;
int maxind = indices_data[linearIndex];
output_data[maxind] = input_data[linearIndex];
}
}
template <typename T>
__global__ void KernelUnpool3dMaxGrad(
const int nthreads, const T* input_data, const int* indices_data,
const int input_depth, const int input_height, const int input_width,
const int channels, const T* output_data, const T* output_grad,
const int output_depth, const int output_height, const int output_width,
T* input_grad) {
CUDA_KERNEL_LOOP(linearIndex, nthreads) {
int c = (linearIndex / input_depth / input_width / input_height) % channels;
int n = linearIndex / input_depth / input_width / input_height / channels;
output_grad +=
(n * channels + c) * output_depth * output_height * output_width;
int maxind = indices_data[linearIndex];
input_grad[linearIndex] = output_grad[maxind];
}
}
/*
* All tensors are in NCDHW format.
*/
template <typename T>
class Unpool2dMaxFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
T* output_data = output->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
KernelUnpool2dMax<T><<<grid, threads, 0, context.stream()>>>(
input.numel(), input_data, indices_data, input_height, input_width,
output_channels, output_data, output_height, output_width);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class Unpool2dMaxGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices,
const framework::Tensor& output,
const framework::Tensor& output_grad,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
KernelUnpool2dMaxGrad<T><<<grid, threads, 0, context.stream()>>>(
input.numel(), input_data, indices_data, input_height, input_width,
output_channels, output_data, output_grad_data, output_height,
output_width, input_grad_data);
}
};
template <typename T>
class Unpool3dMaxFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices, framework::Tensor* output) {
const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output->dims()[1];
const int output_depth = output->dims()[2];
const int output_height = output->dims()[3];
const int output_width = output->dims()[4];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
T* output_data = output->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
KernelUnpool3dMax<T><<<grid, threads, 0, context.stream()>>>(
input.numel(), input_data, indices_data, input_depth, input_height,
input_width, output_channels, output_data, output_depth, output_height,
output_width);
}
};
/*
* All tensors are in NCDHW format.
*/
template <typename T>
class Unpool3dMaxGradFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& indices,
const framework::Tensor& output,
const framework::Tensor& output_grad,
framework::Tensor* input_grad) {
const int batch_size = input.dims()[0];
const int input_depth = input.dims()[2];
const int input_height = input.dims()[3];
const int input_width = input.dims()[4];
const int output_channels = output.dims()[1];
const int output_depth = output.dims()[2];
const int output_height = output.dims()[3];
const int output_width = output.dims()[4];
const T* input_data = input.data<T>();
const int* indices_data = indices.data<int>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
#ifdef __HIPCC__
int threads = 256;
#else
int threads = 1024;
#endif
int grid = (input.numel() + threads - 1) / threads;
KernelUnpool3dMaxGrad<T><<<grid, threads, 0, context.stream()>>>(
input.numel(), input_data, indices_data, input_depth, input_height,
input_width, output_channels, output_data, output_grad_data,
output_depth, output_height, output_width, input_grad_data);
}
};
template class Unpool2dMaxGradFunctor<platform::CUDADeviceContext, float>;
template class Unpool2dMaxGradFunctor<platform::CUDADeviceContext, double>;
template class Unpool2dMaxFunctor<platform::CUDADeviceContext, float>;
template class Unpool2dMaxFunctor<platform::CUDADeviceContext, double>;
template class Unpool3dMaxGradFunctor<platform::CUDADeviceContext, float>;
template class Unpool3dMaxGradFunctor<platform::CUDADeviceContext, double>;
template class Unpool3dMaxFunctor<platform::CUDADeviceContext, float>;
template class Unpool3dMaxFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
27e8105b5e8c4b70c9527533140f795b57488e92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "NIN.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../layers/BranchLayer.h"
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea);
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda);
/*
*blocks : dim3(kernelAmount2)
*threads : dim3(256)
*shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea);
void NIN::calCost()
{
cost->gpuClear();
hipLaunchKernelGGL(( g_getCost_3), dim3(dim3(w.size())), dim3(dim3(32)), sizeof(float) * 32, 0, cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN:getCost");
}
void NIN::feedforward()
{
if((inputs == NULL))
{
printf("NIN init error\n");
exit(0);
}
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 1024));
hipLaunchKernelGGL(( g_NIN_feedforward), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
outputs->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_feedforward");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
cuMatrix<float>* cpp_outputs = new cuMatrix<float>(outputs->rows, outputs->cols, outputs->channels);
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
b[ok]->toCpu();
}
inputs->toCpu();
outputs->toCpu();
for(int bt = 0; bt < batch; bt++){
for(int ok = 0; ok < outputAmount; ok++){
for(int i = 0; i < outputDim; i++){
for(int j = 0; j < outputDim; j++){
float value = 0.0;
for(int ik = 0; ik < inputAmount; ik++){
value += inputs->get(bt, i * inputDim + j, ik) * w[ok]->get(0,0,ik);
}
value += b[ok]->get(0,0,0);
cpp_outputs->set(bt, i * outputDim + j, ok, value);
}
}
}
}
checkMatrixIsSame(outputs, cpp_outputs);
init = true;
}
#endif
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_nonLinearity), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_nonLinearity");
}
}
void NIN::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_dnonLinearity), dim3(block), dim3(thread), 0, 0, curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_dnonLinearity");
}
if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data"))
return;
dim3 block = dim3(batch, inputAmount);
dim3 thread= dim3(min(inputDim * inputDim, 1024));
hipLaunchKernelGGL(( g_NIN_backpropagation), dim3(block), dim3(thread), sizeof(float) * outputAmount, 0,
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
curDelta->getArea(),
preDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_backpropagation");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
curDelta->toCpu();
preDelta->toCpu();
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
}
cuMatrix<float>*cpp_preDelta = new cuMatrix<float>(preDelta->rows, preDelta->cols, preDelta->channels);
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
float value = 0.0;
for(int ok = 0; ok < outputAmount; ok++){
value += curDelta->get(bt, i * outputDim + j, ok) * w[ok]->get(0,0,ik);
}
cpp_preDelta->set(bt, i * inputDim + j, ik, value);
}
}
}
}
checkMatrixIsSame(preDelta, cpp_preDelta);
init = true;
}
#endif
}
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int ik = blockIdx.y;
int tid = threadIdx.x;
_sum[tid] = 0;
int inputAmount = gridDim.y;
__syncthreads();
int tlen = batch;
float* wgradTmp = _WgradTmp[ok];
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[ik + b * inputAmount];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
else{
return;
}
len = (len + 1) >> 1;
}
if(tid == 0)
{
Wgrad[ok][ik] = _sum[0] / batch + w[ok][ik] * lambda;
}
}
void NIN::getGrad()
{
if(outputDim >= 8 && inputAmount == 32){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(32, inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad_1<32, 32>), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >= 8 && inputAmount == 64){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(16, inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad_1<64, 16>), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >=8 && inputAmount == 128){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad_1<128, 8>), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(inputAmount);
hipLaunchKernelGGL(( g_NIN_wgrad), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad");
}
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
inputs->toCpu();
curDelta->toCpu();
for(size_t i = 0; i < wgradTmp.size(); i++){
wgradTmp[i]->toCpu();
}
cuMatrixVector<float>cpp_wgradTmp;
for(int ok = 0; ok < outputAmount; ok++){
cpp_wgradTmp.push_back(new cuMatrix<float>(wgradTmp[ok]->rows, wgradTmp[ok]->cols, wgradTmp[ok]->channels));
}
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int ok = 0; ok < outputAmount; ok++){
float value = 0.0;
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
value += inputs->get(bt, i * inputDim + j, ik) * curDelta->get(bt, i * inputDim + j, ok);
}
}
cpp_wgradTmp[ok]->set(bt, ik, 0, value);
}
}
}
for(size_t i = 0; i < wgradTmp.size(); i++){
checkMatrixIsSame(wgradTmp[i], cpp_wgradTmp[i]);
}
init = true;
}
#endif
dim3 block = dim3(outputAmount, inputAmount);
dim3 thread = dim3(batch);
hipLaunchKernelGGL(( g_NIN_wgradAdd), dim3(block), dim3(thread), sizeof(float) * batch, 0,
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
batch,
lambda);
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("g_NIN_wgradAdd");
block = dim3(outputAmount);
thread= dim3(256);
hipLaunchKernelGGL(( g_NIN_Bgrad), dim3(block), dim3(thread), sizeof(float) * thread.x, 0, curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea());
checkCudaErrors(hipStreamSynchronize(0));
getLastCudaError("NIN::getGrad::g_NIN_Bgrad");
}
void NIN::updateWeight()
{
dim3 block = outputAmount;
dim3 thread = min(256, w[0]->getLen());
hipLaunchKernelGGL(( g_vecAdd), dim3(block), dim3(thread), 0, 0, momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate(), Config::instance()->getLrate());
}
NIN::NIN(std::string name)
{
m_name = name;
ConfigNIN* config = (ConfigNIN*)Config::instance()->getLayerByName(m_name);
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
if(inputs == NULL){
/*inputs = NULL the type must be BranchLayers*/
Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer());
Assert(config->m_subInput != std::string("NULL"));
BranchLayer* bl = static_cast<BranchLayer*>(preLayer);
inputs = bl->getSubOutput(config->m_subInput);
preDelta = bl->getSubCurDelta(config->m_subInput);
}else{
preDelta = preLayer->getCurDelta();
}
inputAmount = preLayer->outputAmount;
outputAmount = config->m_amount;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
outputs = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount; i++){
w.push_back(new cuMatrix<float>(1, 1, inputAmount));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(1, 1, inputAmount));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, inputAmount, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount; i++){
momentum_w.push_back(new cuMatrix<float>(1, 1, inputAmount));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void NIN::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void NIN::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void NIN::initRandom()
{
//srand(clock());
float initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
float r1 = 0.5f + 4.0f * (rand()) / RAND_MAX;
float r2 = 0.5f + 4.0f * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
1, 1, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < (int)w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void NIN::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int a = 0; a < (int)w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea)
{
int sp = blockIdx.x;
int ok = blockIdx.y;
int outputSize2 = outputDim * outputDim;
int inputSize2 = inputDim* inputDim;
float b = bs[ok][0];
float *w = ws[ok];
float* curOutput = outputs + ok * outputArea + sp * outputSize2;
/*convolution*/
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int idx = tidx + threadIdx.x;
if(idx < outputSize2)
{
float val = 0.0;
int skip_add = sp * inputSize2;
for(int ik = 0; ik < inputAmount; ik++){
float* curInput = inputs + skip_add;
val += curInput[idx] * w[ik];
skip_add += inputArea;
}
curOutput[idx] = val + b;
}
}
}
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea)
{
extern __shared__ float wShared[];
int sp = blockIdx.x;
int ik = blockIdx.y;
for(int id = 0; id < curAmount; id += blockDim.x){
int idx = id + threadIdx.x;
if(idx < curAmount){
wShared[idx] = ws[idx][ik];
}
}
__syncthreads();
int curSize2 = curDim * curDim;
int preSize2 = preDim * preDim;
float *preDelta = _preDelta + ik * preArea + sp * preSize2;
for (int tidx = 0; tidx < preSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < preSize2) {
float val = 0.0;
int skip_add = sp * curSize2;
for(int ok = 0; ok < curAmount; ok++){
float *curDelta = _curDelta + skip_add;
val += curDelta[idx] * wShared[ok];
skip_add += curArea;
}
preDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
int ok = blockIdx.y;
int ik = threadIdx.x;
int b = blockIdx.x;
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x++){
val += input[x] * curDelta[x];
}
wgradTmp[ok][ik + b * inputAmount] = val;
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
__shared__ float __sum[INPUTAMOUNT][THREADS];
int ok = blockIdx.y;
int ik = threadIdx.y;
int b = blockIdx.x;
float* _sum = __sum[ik];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x += blockDim.x){
int idx = x + threadIdx.x;
if(idx < inputSize2){
val += input[idx] * curDelta[idx];
}
}
_sum[threadIdx.x] = val;
__syncthreads();
int len = THREADS;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
if(threadIdx.x == 0){
wgradTmp[ok][ik + b * inputAmount] = _sum[0];
}
}
/*
* block = dim3(outputAmount);
* thread= dim3(256);
* shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea)
{
extern __shared__ float _sum[];
int k2 = blockIdx.x;
_sum[threadIdx.x] = 0.0;
__syncthreads();
int deltaSize2 = deltaSize * deltaSize;
int tlen = deltaSize2 * batch;
for(int i = 0; i < tlen; i += blockDim.x)
{
int idx = i + threadIdx.x;
if(idx < tlen)
{
int s = idx / (deltaSize2);//s
int t2 = idx % (deltaSize2);//x,y
int id =
deltaArea * k2 + s * deltaSize2 + t2;
_sum[threadIdx.x] += delta[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k2][0] = _sum[0] / batch;
}
}
| 27e8105b5e8c4b70c9527533140f795b57488e92.cu | #include "NIN.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
#include "../layers/BranchLayer.h"
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea);
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea);
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda);
/*
*blocks : dim3(kernelAmount2)
*threads : dim3(256)
*shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea);
void NIN::calCost()
{
cost->gpuClear();
g_getCost_3<<<dim3(w.size()), dim3(32), sizeof(float) * 32>>>(cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN:getCost");
}
void NIN::feedforward()
{
if((inputs == NULL))
{
printf("NIN init error\n");
exit(0);
}
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 1024));
g_NIN_feedforward<<<block, thread>>>(
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
outputs->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_feedforward");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
cuMatrix<float>* cpp_outputs = new cuMatrix<float>(outputs->rows, outputs->cols, outputs->channels);
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
b[ok]->toCpu();
}
inputs->toCpu();
outputs->toCpu();
for(int bt = 0; bt < batch; bt++){
for(int ok = 0; ok < outputAmount; ok++){
for(int i = 0; i < outputDim; i++){
for(int j = 0; j < outputDim; j++){
float value = 0.0;
for(int ik = 0; ik < inputAmount; ik++){
value += inputs->get(bt, i * inputDim + j, ik) * w[ok]->get(0,0,ik);
}
value += b[ok]->get(0,0,0);
cpp_outputs->set(bt, i * outputDim + j, ok, value);
}
}
}
}
checkMatrixIsSame(outputs, cpp_outputs);
init = true;
}
#endif
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_nonLinearity<<<block, thread>>>(
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_nonLinearity");
}
}
void NIN::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_dnonLinearity<<<block, thread>>>(curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_dnonLinearity");
}
if(Config::instance()->getLayerByName(m_name)->m_input == std::string("data"))
return;
dim3 block = dim3(batch, inputAmount);
dim3 thread= dim3(min(inputDim * inputDim, 1024));
g_NIN_backpropagation<<<block, thread, sizeof(float) * outputAmount>>>(
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
curDelta->getArea(),
preDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::g_NIN_backpropagation");
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
curDelta->toCpu();
preDelta->toCpu();
for(int ok = 0; ok < outputAmount; ok++){
w[ok]->toCpu();
}
cuMatrix<float>*cpp_preDelta = new cuMatrix<float>(preDelta->rows, preDelta->cols, preDelta->channels);
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
float value = 0.0;
for(int ok = 0; ok < outputAmount; ok++){
value += curDelta->get(bt, i * outputDim + j, ok) * w[ok]->get(0,0,ik);
}
cpp_preDelta->set(bt, i * inputDim + j, ik, value);
}
}
}
}
checkMatrixIsSame(preDelta, cpp_preDelta);
init = true;
}
#endif
}
/*
* block = dim3(outputAmount, inputAmount);
* thread= dim3(batch);
*/
__global__ void g_NIN_wgradAdd(
float** _WgradTmp,
float** Wgrad,
float** w,
int batch,
float lambda)
{
extern __shared__ float _sum[];
int ok = blockIdx.x;
int ik = blockIdx.y;
int tid = threadIdx.x;
_sum[tid] = 0;
int inputAmount = gridDim.y;
__syncthreads();
int tlen = batch;
float* wgradTmp = _WgradTmp[ok];
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[ik + b * inputAmount];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
else{
return;
}
len = (len + 1) >> 1;
}
if(tid == 0)
{
Wgrad[ok][ik] = _sum[0] / batch + w[ok][ik] * lambda;
}
}
void NIN::getGrad()
{
if(outputDim >= 8 && inputAmount == 32){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(32, inputAmount);
g_NIN_wgrad_1<32, 32><<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >= 8 && inputAmount == 64){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(16, inputAmount);
g_NIN_wgrad_1<64, 16><<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else if(outputDim >=8 && inputAmount == 128){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, inputAmount);
g_NIN_wgrad_1<128, 8><<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad_1");
}else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(inputAmount);
g_NIN_wgrad<<<block, thread>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgrad");
}
#ifdef TEST_CUDA_CODE
static bool init = false;
if(init == false){
inputs->toCpu();
curDelta->toCpu();
for(size_t i = 0; i < wgradTmp.size(); i++){
wgradTmp[i]->toCpu();
}
cuMatrixVector<float>cpp_wgradTmp;
for(int ok = 0; ok < outputAmount; ok++){
cpp_wgradTmp.push_back(new cuMatrix<float>(wgradTmp[ok]->rows, wgradTmp[ok]->cols, wgradTmp[ok]->channels));
}
for(int bt = 0; bt < batch; bt++){
for(int ik = 0; ik < inputAmount; ik++){
for(int ok = 0; ok < outputAmount; ok++){
float value = 0.0;
for(int i = 0; i < inputDim; i++){
for(int j = 0; j < inputDim; j++){
value += inputs->get(bt, i * inputDim + j, ik) * curDelta->get(bt, i * inputDim + j, ok);
}
}
cpp_wgradTmp[ok]->set(bt, ik, 0, value);
}
}
}
for(size_t i = 0; i < wgradTmp.size(); i++){
checkMatrixIsSame(wgradTmp[i], cpp_wgradTmp[i]);
}
init = true;
}
#endif
dim3 block = dim3(outputAmount, inputAmount);
dim3 thread = dim3(batch);
g_NIN_wgradAdd<<<block, thread, sizeof(float) * batch>>>(
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
batch,
lambda);
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("g_NIN_wgradAdd");
block = dim3(outputAmount);
thread= dim3(256);
g_NIN_Bgrad<<<block, thread, sizeof(float) * thread.x>>>(curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea());
checkCudaErrors(cudaStreamSynchronize(0));
getLastCudaError("NIN::getGrad::g_NIN_Bgrad");
}
void NIN::updateWeight()
{
dim3 block = outputAmount;
dim3 thread = min(256, w[0]->getLen());
g_vecAdd<<<block, thread>>>(momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate(), Config::instance()->getLrate());
}
NIN::NIN(std::string name)
{
m_name = name;
ConfigNIN* config = (ConfigNIN*)Config::instance()->getLayerByName(m_name);
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
if(inputs == NULL){
/*inputs = NULL the type must be BranchLayers*/
Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer());
Assert(config->m_subInput != std::string("NULL"));
BranchLayer* bl = static_cast<BranchLayer*>(preLayer);
inputs = bl->getSubOutput(config->m_subInput);
preDelta = bl->getSubCurDelta(config->m_subInput);
}else{
preDelta = preLayer->getCurDelta();
}
inputAmount = preLayer->outputAmount;
outputAmount = config->m_amount;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
outputs = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<float>(batch, outputDim * outputDim, outputAmount);
for(int i = 0; i < outputAmount; i++){
w.push_back(new cuMatrix<float>(1, 1, inputAmount));
b.push_back(new cuMatrix<float>(1, 1, 1));
wgrad.push_back(new cuMatrix<float>(1, 1, inputAmount));
bgrad.push_back(new cuMatrix<float>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<float>(batch, inputAmount, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount; i++){
momentum_w.push_back(new cuMatrix<float>(1, 1, inputAmount));
momentum_b.push_back(new cuMatrix<float>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void NIN::save(FILE* file)
{
for(int a = 0; a < (int)w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%f ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%f ", b[a]->get(i, j, c));
}
}
}
}
}
void NIN::clearMomentum()
{
for(int i = 0; i < (int)momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < (int)momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void NIN::initRandom()
{
//srand(clock());
float initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < (int)w.size(); i++){
float epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
float r1 = 0.5f + 4.0f * (rand()) / RAND_MAX;
float r2 = 0.5f + 4.0f * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
1, 1, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < (int)w.size(); i++){
for(int j = 0; j < (int)w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f);
//printf("%f ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void NIN::initFromCheckpoint(FILE* file)
{
float val = 0;
for(int a = 0; a < (int)w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
if(fscanf(file, "%f", &val) == EOF)
{
LOG("scanf fail", "result/log.txt");
}
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
* dim3 block = dim3(batch, outputAmpunt);
* dim3 thread= dim3(outputDim * outputDim);
*/
__global__ void g_NIN_feedforward(
float* inputs,
float** ws,
float** bs,
float* outputs,
int inputDim,
int outputDim,
int inputAmount,
int outputAmount,
int inputArea,
int outputArea)
{
int sp = blockIdx.x;
int ok = blockIdx.y;
int outputSize2 = outputDim * outputDim;
int inputSize2 = inputDim* inputDim;
float b = bs[ok][0];
float *w = ws[ok];
float* curOutput = outputs + ok * outputArea + sp * outputSize2;
/*convolution*/
for(int tidx = 0; tidx < outputSize2; tidx += blockDim.x)
{
int idx = tidx + threadIdx.x;
if(idx < outputSize2)
{
float val = 0.0;
int skip_add = sp * inputSize2;
for(int ik = 0; ik < inputAmount; ik++){
float* curInput = inputs + skip_add;
val += curInput[idx] * w[ik];
skip_add += inputArea;
}
curOutput[idx] = val + b;
}
}
}
/*
* dim3 block = dim3(batch, inputAmount);
* dim3 thread= min(inputDim * inputDim, 512);
*/
__global__ void g_NIN_backpropagation(
float* _curDelta,
float**ws,
float* _preDelta,
int curDim,
int preDim,
int preAmount,
int curAmount,
int curArea,
int preArea)
{
extern __shared__ float wShared[];
int sp = blockIdx.x;
int ik = blockIdx.y;
for(int id = 0; id < curAmount; id += blockDim.x){
int idx = id + threadIdx.x;
if(idx < curAmount){
wShared[idx] = ws[idx][ik];
}
}
__syncthreads();
int curSize2 = curDim * curDim;
int preSize2 = preDim * preDim;
float *preDelta = _preDelta + ik * preArea + sp * preSize2;
for (int tidx = 0; tidx < preSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < preSize2) {
float val = 0.0;
int skip_add = sp * curSize2;
for(int ok = 0; ok < curAmount; ok++){
float *curDelta = _curDelta + skip_add;
val += curDelta[idx] * wShared[ok];
skip_add += curArea;
}
preDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(inputAmount);
*/
__global__ void g_NIN_wgrad(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
int ok = blockIdx.y;
int ik = threadIdx.x;
int b = blockIdx.x;
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x++){
val += input[x] * curDelta[x];
}
wgradTmp[ok][ik + b * inputAmount] = val;
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(THREADS, inputAmount);
*/
template <int INPUTAMOUNT, int THREADS>
__global__ void g_NIN_wgrad_1(float*_inputs,
float* _curDelta,
float** wgradTmp,
int inputDim,
int curDeltaDim,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea)
{
__shared__ float __sum[INPUTAMOUNT][THREADS];
int ok = blockIdx.y;
int ik = threadIdx.y;
int b = blockIdx.x;
float* _sum = __sum[ik];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
float* input = _inputs + ik * inputArea + b * inputSize2;
float* curDelta = _curDelta + ok * curDeltaAea + b * curDeltaSize2;
float val = 0.0;
for(int x = 0; x < inputSize2; x += blockDim.x){
int idx = x + threadIdx.x;
if(idx < inputSize2){
val += input[idx] * curDelta[idx];
}
}
_sum[threadIdx.x] = val;
__syncthreads();
int len = THREADS;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
if(threadIdx.x == 0){
wgradTmp[ok][ik + b * inputAmount] = _sum[0];
}
}
/*
* block = dim3(outputAmount);
* thread= dim3(256);
* shared : sizeof(float) * 256
*/
__global__ void g_NIN_Bgrad(float* delta,
float** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea)
{
extern __shared__ float _sum[];
int k2 = blockIdx.x;
_sum[threadIdx.x] = 0.0;
__syncthreads();
int deltaSize2 = deltaSize * deltaSize;
int tlen = deltaSize2 * batch;
for(int i = 0; i < tlen; i += blockDim.x)
{
int idx = i + threadIdx.x;
if(idx < tlen)
{
int s = idx / (deltaSize2);//s
int t2 = idx % (deltaSize2);//x,y
int id =
deltaArea * k2 + s * deltaSize2 + t2;
_sum[threadIdx.x] += delta[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k2][0] = _sum[0] / batch;
}
}
|
e1dd2207e0314309b0cd117d3499b086b1bf1271.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************<SCHWARZ LIB LICENSE>***********************
Copyright (c) 2019, the SCHWARZ LIB authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<SCHWARZ LIB LICENSE>*************************/
#include <functional>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
namespace schwz {
template <typename ValueType, typename IndexType, typename AdditionalOperation>
__global__ void gather_kernel(const IndexType num_elems,
const IndexType *indices,
const ValueType *gather_from,
ValueType *gather_into, AdditionalOperation op)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_elems) {
gather_into[row] = op(gather_into[row], gather_from[indices[row]]);
}
}
template <typename ValueType, typename IndexType>
void gather_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return y;
};
hipLaunchKernelGGL(( gather_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, 0, num_elems, indices, gather_from,
gather_into, op);
}
template <typename ValueType, typename IndexType>
void gather_add_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return y + x;
};
hipLaunchKernelGGL(( gather_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, 0, num_elems, indices, gather_from,
gather_into, op);
}
template <typename ValueType, typename IndexType>
void gather_diff_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return y - x;
};
hipLaunchKernelGGL(( gather_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, 0, num_elems, indices, gather_from,
gather_into, op);
}
template <typename ValueType, typename IndexType>
void gather_avg_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return (y + x) / 2;
};
hipLaunchKernelGGL(( gather_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, 0, num_elems, indices, gather_from,
gather_into, op);
}
#define INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(_macro) \
template _macro(float, int); \
template _macro(double, int); \
template _macro(int, int); \
template _macro(long int, int); \
template _macro(float, long int); \
template _macro(double, long int); \
template _macro(int, long int); \
template _macro(long int, long int);
#define DECLARE_GATHER(ValueType, IndexType) \
void gather_values(const IndexType, const IndexType *, const ValueType *, \
ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER);
#undef DECLARE_GATHER
#define DECLARE_GATHER_ADD(ValueType, IndexType) \
void gather_add_values(const IndexType, const IndexType *, \
const ValueType *, ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER_ADD);
#undef DECLARE_GATHER_ADD
#define DECLARE_GATHER_DIFF(ValueType, IndexType) \
void gather_diff_values(const IndexType, const IndexType *, \
const ValueType *, ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER_DIFF);
#undef DECLARE_GATHER_DIFF
#define DECLARE_GATHER_AVG(ValueType, IndexType) \
void gather_avg_values(const IndexType, const IndexType *, \
const ValueType *, ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER_AVG);
#undef DECLARE_GATHER_AVG
} // namespace schwz | e1dd2207e0314309b0cd117d3499b086b1bf1271.cu |
/*******************************<SCHWARZ LIB LICENSE>***********************
Copyright (c) 2019, the SCHWARZ LIB authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************<SCHWARZ LIB LICENSE>*************************/
#include <functional>
#include <cuda_runtime.h>
#define BLOCK_SIZE 512
namespace schwz {
template <typename ValueType, typename IndexType, typename AdditionalOperation>
__global__ void gather_kernel(const IndexType num_elems,
const IndexType *indices,
const ValueType *gather_from,
ValueType *gather_into, AdditionalOperation op)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_elems) {
gather_into[row] = op(gather_into[row], gather_from[indices[row]]);
}
}
template <typename ValueType, typename IndexType>
void gather_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return y;
};
gather_kernel<<<grid, BLOCK_SIZE, 0, 0>>>(num_elems, indices, gather_from,
gather_into, op);
}
template <typename ValueType, typename IndexType>
void gather_add_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return y + x;
};
gather_kernel<<<grid, BLOCK_SIZE, 0, 0>>>(num_elems, indices, gather_from,
gather_into, op);
}
template <typename ValueType, typename IndexType>
void gather_diff_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return y - x;
};
gather_kernel<<<grid, BLOCK_SIZE, 0, 0>>>(num_elems, indices, gather_from,
gather_into, op);
}
template <typename ValueType, typename IndexType>
void gather_avg_values(const IndexType num_elems, const IndexType *indices,
const ValueType *gather_from, ValueType *gather_into)
{
dim3 grid((num_elems + BLOCK_SIZE - 1) / BLOCK_SIZE, 1, 1);
auto op = [] __device__(const ValueType &x, const ValueType &y) {
return (y + x) / 2;
};
gather_kernel<<<grid, BLOCK_SIZE, 0, 0>>>(num_elems, indices, gather_from,
gather_into, op);
}
#define INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(_macro) \
template _macro(float, int); \
template _macro(double, int); \
template _macro(int, int); \
template _macro(long int, int); \
template _macro(float, long int); \
template _macro(double, long int); \
template _macro(int, long int); \
template _macro(long int, long int);
#define DECLARE_GATHER(ValueType, IndexType) \
void gather_values(const IndexType, const IndexType *, const ValueType *, \
ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER);
#undef DECLARE_GATHER
#define DECLARE_GATHER_ADD(ValueType, IndexType) \
void gather_add_values(const IndexType, const IndexType *, \
const ValueType *, ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER_ADD);
#undef DECLARE_GATHER_ADD
#define DECLARE_GATHER_DIFF(ValueType, IndexType) \
void gather_diff_values(const IndexType, const IndexType *, \
const ValueType *, ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER_DIFF);
#undef DECLARE_GATHER_DIFF
#define DECLARE_GATHER_AVG(ValueType, IndexType) \
void gather_avg_values(const IndexType, const IndexType *, \
const ValueType *, ValueType *)
INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(DECLARE_GATHER_AVG);
#undef DECLARE_GATHER_AVG
} // namespace schwz |
ead2a594607374843ce9745860ec1a22aef870de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_a;
int xdim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_a;
int ydim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_a;
int xdim1_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_a;
int ydim1_update_halo_kernel4_plus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_a * (y) + \
xdim0_update_halo_kernel4_plus_2_a * ydim0_update_halo_kernel4_plus_2_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_a * (y) + \
xdim1_update_halo_kernel4_plus_2_a * ydim1_update_halo_kernel4_plus_2_a * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_a(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_a *
ydim0_update_halo_kernel4_plus_2_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_a *
ydim1_update_halo_kernel4_plus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 122))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(122, "update_halo_kernel4_plus_2_a");
OPS_kernels[122].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_a_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_a_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_a_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[122].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[122].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[122].mpi_time += t2 - t1;
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| ead2a594607374843ce9745860ec1a22aef870de.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel4_plus_2_a;
int xdim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel4_plus_2_a;
int ydim0_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel4_plus_2_a;
int xdim1_update_halo_kernel4_plus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel4_plus_2_a;
int ydim1_update_halo_kernel4_plus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel4_plus_2_a * (y) + \
xdim0_update_halo_kernel4_plus_2_a * ydim0_update_halo_kernel4_plus_2_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel4_plus_2_a * (y) + \
xdim1_update_halo_kernel4_plus_2_a * ydim1_update_halo_kernel4_plus_2_a * \
(z))
// user function
__device__
inline void
update_halo_kernel4_plus_2_a(double *vol_flux_y, double *mass_flux_y,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Y] == 1)
vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(2, 0, 0)];
if (fields[FIELD_MASS_FLUX_Y] == 1)
mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(2, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel4_plus_2_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_2_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_2_a *
ydim0_update_halo_kernel4_plus_2_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_2_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_2_a *
ydim1_update_halo_kernel4_plus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel4_plus_2_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel4_plus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 122))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(122, "update_halo_kernel4_plus_2_a");
OPS_kernels[122].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel4_plus_2_a_h ||
ydim0 != ydim0_update_halo_kernel4_plus_2_a_h ||
xdim1 != xdim1_update_halo_kernel4_plus_2_a_h ||
ydim1 != ydim1_update_halo_kernel4_plus_2_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_2_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel4_plus_2_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_2_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel4_plus_2_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_2_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel4_plus_2_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_2_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel4_plus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[122].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel4_plus_2_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[122].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[122].mpi_time += t2 - t1;
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
5fe07ce91fdcb8acb740e501b5ca22484273f01b.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <[email protected]>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::sort(d_vec.begin(), d_vec.end());
hipDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
| 5fe07ce91fdcb8acb740e501b5ca22484273f01b.cu | //---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <[email protected]>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::sort(d_vec.begin(), d_vec.end());
cudaDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
|
bffefcc95b2ef939761c6413159ac0c6fdb07dd0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <set>
#include <string>
#include <iostream>
#include <time.h>
#include <stdlib.h>
#include <random>
inline void GPUassert(hipError_t code, char * file, int line, bool Abort = true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (Abort) return;
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
__device__ int factorial(int n) {
if (n == 1) {
return 1;
}
return n * factorial(n - 1);
}
__global__ void permute_kernel_MC(char* d_A, int size, unsigned long long seed, char* svePermutacije) {
int jednoD = blockIdx.x;
int dvoD = jednoD + blockIdx.y*gridDim.x;
int troD = dvoD + gridDim.x*gridDim.y*blockIdx.z;
int tid;
tid = troD * blockDim.x + threadIdx.x;
int fakt = 720;
if (tid < fakt) {
hiprandState_t state;
char* kopija = new char[size];
for (int i = 0; i < size; i++) {
kopija[i] = d_A[i];
}
for (int i = 0; i < size; i++) {
hiprand_init(seed + tid + i, fakt, 0, &state);
unsigned int r1 = hiprand_uniform(&state)*size;
unsigned int r2 = hiprand_uniform(&state)*size;
char temp = kopija[r1];
kopija[r1] = kopija[r2];
kopija[r2] = temp;
}
for (int i = 0; i < size; i++) {
svePermutacije[size * tid + i] = kopija[i];
}
delete[] kopija;
}
}
int factorialHost(int n) {
if (n == 1) {
return 1;
}
return n * factorialHost(n - 1);
}
void funkcija(FILE *fp, int n, double *sum, double *maxi, double *mini) {
clock_t begin = clock();
char h_a[] = "ABCDEF";
char* d_a;
int duzina = 6;
hipMalloc((void**)&d_a, sizeof(h_a));
GPUerrchk(hipMemcpy(d_a, h_a, sizeof(h_a), hipMemcpyHostToDevice));
int fakt = factorialHost(duzina);
int threadNoMC = fakt;
char* h_svePermutacije = new char[threadNoMC * duzina];
char* svePermutacije;
hipMalloc((void**)&svePermutacije, sizeof(char)* threadNoMC * duzina);
hipMemset(svePermutacije, '0', sizeof(char) * threadNoMC * duzina);
std::set<std::string> unikatno;
printf("\n\n LV\n");
int number = 1;
while (threadNoMC / number > 320) number++;
while (1.0*threadNoMC / number - int(threadNoMC / number) > 0) number++;
int a = threadNoMC / number;
int brojac = 0;
while (unikatno.size() != fakt) {
permute_kernel_MC << <number, a >> > (d_a, duzina, (123456 + rand()), svePermutacije);
GPUerrchk(hipMemcpy(h_svePermutacije, svePermutacije, sizeof(char)* threadNoMC * duzina, hipMemcpyDeviceToHost));
brojac++;
std::string temp;
for (int i = 0; i < threadNoMC; i++) {
temp = "";
for (int j = 0; j < duzina; j++) {
char c = *(h_svePermutacije + duzina * i + j);
temp = temp + c;
}
unikatno.insert(temp);
}
}
for (std::string s : unikatno) {
std::cout << s << std::endl;
}
delete[] h_svePermutacije;
GPUerrchk(hipPeekAtLastError());
GPUerrchk(hipDeviceSynchronize());
time_t end = clock();
printf("Vrijeme izvrsenja u sekundama je: %f\n", (double)(end - begin) / CLOCKS_PER_SEC);
if (n != 0) {
fprintf(fp, "%d,%f\n", n, (double)(end - begin) / CLOCKS_PER_SEC);
*sum += (double)(end - begin) / CLOCKS_PER_SEC;
if (*maxi < (double)(end - begin) / CLOCKS_PER_SEC) *maxi = (double)(end - begin) / CLOCKS_PER_SEC;
if (*mini > (double)(end - begin) / CLOCKS_PER_SEC) *mini = (double)(end - begin) / CLOCKS_PER_SEC;
}
}
int main()
{
srand(time(NULL));
FILE *fp;
fp = fopen("C:\\Users\\ismar\\Desktop\\LVP.csv", "w");
double sum = 0.0;
double maxi = -999999.9;
double mini = 999999.9;
for (int i = 0; i <= 100; i++) {
if (fp == NULL) {
printf("Couldn't open file\n");
return;
}
funkcija(fp, i, &sum, &maxi, &mini);
}
fprintf(fp, "%s,%f\n", "Minimum", mini);
fprintf(fp, "%s,%f\n", "Maximum", maxi);
fprintf(fp, "%s,%f\n", "Prosjek", 1.0*sum / 100);
printf("Prosjecno vrijeme izvrsavanja je: %f", 1.0*sum / 100);
fclose(fp);
return 0;
}
| bffefcc95b2ef939761c6413159ac0c6fdb07dd0.cu | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#include <set>
#include <string>
#include <iostream>
#include <time.h>
#include <stdlib.h>
#include <random>
inline void GPUassert(cudaError_t code, char * file, int line, bool Abort = true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (Abort) return;
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
__device__ int factorial(int n) {
if (n == 1) {
return 1;
}
return n * factorial(n - 1);
}
__global__ void permute_kernel_MC(char* d_A, int size, unsigned long long seed, char* svePermutacije) {
int jednoD = blockIdx.x;
int dvoD = jednoD + blockIdx.y*gridDim.x;
int troD = dvoD + gridDim.x*gridDim.y*blockIdx.z;
int tid;
tid = troD * blockDim.x + threadIdx.x;
int fakt = 720;
if (tid < fakt) {
curandState state;
char* kopija = new char[size];
for (int i = 0; i < size; i++) {
kopija[i] = d_A[i];
}
for (int i = 0; i < size; i++) {
curand_init(seed + tid + i, fakt, 0, &state);
unsigned int r1 = curand_uniform(&state)*size;
unsigned int r2 = curand_uniform(&state)*size;
char temp = kopija[r1];
kopija[r1] = kopija[r2];
kopija[r2] = temp;
}
for (int i = 0; i < size; i++) {
svePermutacije[size * tid + i] = kopija[i];
}
delete[] kopija;
}
}
int factorialHost(int n) {
if (n == 1) {
return 1;
}
return n * factorialHost(n - 1);
}
void funkcija(FILE *fp, int n, double *sum, double *maxi, double *mini) {
clock_t begin = clock();
char h_a[] = "ABCDEF";
char* d_a;
int duzina = 6;
cudaMalloc((void**)&d_a, sizeof(h_a));
GPUerrchk(cudaMemcpy(d_a, h_a, sizeof(h_a), cudaMemcpyHostToDevice));
int fakt = factorialHost(duzina);
int threadNoMC = fakt;
char* h_svePermutacije = new char[threadNoMC * duzina];
char* svePermutacije;
cudaMalloc((void**)&svePermutacije, sizeof(char)* threadNoMC * duzina);
cudaMemset(svePermutacije, '0', sizeof(char) * threadNoMC * duzina);
std::set<std::string> unikatno;
printf("\n\n LV\n");
int number = 1;
while (threadNoMC / number > 320) number++;
while (1.0*threadNoMC / number - int(threadNoMC / number) > 0) number++;
int a = threadNoMC / number;
int brojac = 0;
while (unikatno.size() != fakt) {
permute_kernel_MC << <number, a >> > (d_a, duzina, (123456 + rand()), svePermutacije);
GPUerrchk(cudaMemcpy(h_svePermutacije, svePermutacije, sizeof(char)* threadNoMC * duzina, cudaMemcpyDeviceToHost));
brojac++;
std::string temp;
for (int i = 0; i < threadNoMC; i++) {
temp = "";
for (int j = 0; j < duzina; j++) {
char c = *(h_svePermutacije + duzina * i + j);
temp = temp + c;
}
unikatno.insert(temp);
}
}
for (std::string s : unikatno) {
std::cout << s << std::endl;
}
delete[] h_svePermutacije;
GPUerrchk(cudaPeekAtLastError());
GPUerrchk(cudaDeviceSynchronize());
time_t end = clock();
printf("Vrijeme izvrsenja u sekundama je: %f\n", (double)(end - begin) / CLOCKS_PER_SEC);
if (n != 0) {
fprintf(fp, "%d,%f\n", n, (double)(end - begin) / CLOCKS_PER_SEC);
*sum += (double)(end - begin) / CLOCKS_PER_SEC;
if (*maxi < (double)(end - begin) / CLOCKS_PER_SEC) *maxi = (double)(end - begin) / CLOCKS_PER_SEC;
if (*mini > (double)(end - begin) / CLOCKS_PER_SEC) *mini = (double)(end - begin) / CLOCKS_PER_SEC;
}
}
int main()
{
srand(time(NULL));
FILE *fp;
fp = fopen("C:\\Users\\ismar\\Desktop\\LVP.csv", "w");
double sum = 0.0;
double maxi = -999999.9;
double mini = 999999.9;
for (int i = 0; i <= 100; i++) {
if (fp == NULL) {
printf("Couldn't open file\n");
return;
}
funkcija(fp, i, &sum, &maxi, &mini);
}
fprintf(fp, "%s,%f\n", "Minimum", mini);
fprintf(fp, "%s,%f\n", "Maximum", maxi);
fprintf(fp, "%s,%f\n", "Prosjek", 1.0*sum / 100);
printf("Prosjecno vrijeme izvrsavanja je: %f", 1.0*sum / 100);
fclose(fp);
return 0;
}
|
9d5ea73c7def5abbe72b3a1716ac61cbb4de79a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void __global__ kernel_test(float *data, int N){
int tx = threadIdx.x + blockIdx.x*blockDim.x;
if (tx < N)
data[tx] = tx;
}
void gpu_test(float *data, int N){
float *d_data = NULL;
hipMalloc(&d_data, N*sizeof(float));
hipLaunchKernelGGL(( kernel_test), dim3(1),dim3(N), 0, 0, d_data, N);
hipMemcpy(data, d_data, N*sizeof(float), hipMemcpyDeviceToHost);
if (d_data)
hipFree(d_data);
} | 9d5ea73c7def5abbe72b3a1716ac61cbb4de79a5.cu |
void __global__ kernel_test(float *data, int N){
int tx = threadIdx.x + blockIdx.x*blockDim.x;
if (tx < N)
data[tx] = tx;
}
void gpu_test(float *data, int N){
float *d_data = NULL;
cudaMalloc(&d_data, N*sizeof(float));
kernel_test<<<1,N>>>(d_data, N);
cudaMemcpy(data, d_data, N*sizeof(float), cudaMemcpyDeviceToHost);
if (d_data)
cudaFree(d_data);
} |
27ed6aff18b143882a5418e2a9e48b10bab31871.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ConsolidateAndIdentifyContours.cu
//
#include "ConsolidateAndIdentifyContours.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
#include "ErrorCode.h"
#include "Template.h"
#include "TemplateFactory.h"
// DEF_BLOCK_X DEF_BLOCK_Y
//
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// RED_MAC_COUNT
//
#define RED_MAC_COUNT 4
// DILATE_TPL_SHAPE SEARCH_TPL_SHAPE
//
#define DILATE_TPL_SHAPE TF_SHAPE_CIRCLE
#define SEARCH_TPL_SHAPE TF_SHAPE_BOX
// OBJ_IDX_OFFSET
//
#define OBJ_IDX_OFFSET 100
// _redMacDiffSize
//
static unsigned _redMacDiffSize[RED_MAC_COUNT] = { 3, 5, 7, 9 };
// redMachine
RobustEdgeDetection *ConsolidateAndIdentifyContours::redMachine = NULL;
// Kernel _searchPrimitiveContourKer
//
//
static __global__ void
_searchPrimitiveContourKer(
ImageCuda inimg, //
ImageCuda outimg, //
ImageCuda abnorimg, //
ImageCuda prmtcont, //
ImageCuda prmtreg, //
unsigned trackrad //
);
// Host initRedMachine
__host__ int ConsolidateAndIdentifyContours::initRedMachine()
{
// redMachine NULL
if (redMachine != NULL)
return NO_ERROR;
//
redMachine = new RobustEdgeDetection[RED_MAC_COUNT];
if (redMachine == NULL)
return OUT_OF_MEM;
//
int errcode = NO_ERROR;
for (int i = 0; i < RED_MAC_COUNT; i++) {
int curerrcode = redMachine[i].setDiffsize(_redMacDiffSize[i]);
//
if (curerrcode < errcode)
errcode = curerrcode;
}
//
return errcode;
}
// Host initMorphMachine
__host__ int ConsolidateAndIdentifyContours::initMorphMachine()
{
//
Template *oldtpl = morphMachine.getTemplate();
//
int errcode;
Template *curtpl = NULL;
size_t boxsize = this->dilationRad * 2 + 1;
errcode = TemplateFactory::getTemplate(&curtpl, DILATE_TPL_SHAPE, boxsize);
if (errcode != NO_ERROR)
return errcode;
//
errcode = morphMachine.setTemplate(curtpl);
if (errcode != NO_ERROR)
return errcode;
// NULL
if (oldtpl != NULL)
TemplateFactory::putTemplate(oldtpl);
//
return NO_ERROR;
}
// Host getCsldtContoursImg
__host__ int ConsolidateAndIdentifyContours::getCsldtContoursImg(
Image *inimg, Image *outimg)
{
// NULL
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
//
//
#define CAIC_GETCONT_ERRFREE(errcode) do { \
for (int _i_cge = 0; _i_cge < RED_MAC_COUNT; _i_cge++) { \
if (edgetmpimg[_i_cge] != NULL) \
ImageBasicOp::deleteImage(edgetmpimg[_i_cge]); \
} \
return (errcode); \
} while (0)
//
//
int errcode = NO_ERROR;
Image *edgetmpimg[RED_MAC_COUNT] = { NULL };
for (int i = 0; i < RED_MAC_COUNT; i++) {
//
errcode = ImageBasicOp::newImage(edgetmpimg + i);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//cout << "AA" << i << endl;
//
errcode = redMachine[i].detectEdgeSA(inimg, edgetmpimg[i], NULL);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//cout << "BB" << i << endl;
}
//
errcode = combineMachine.combineImageMax(edgetmpimg, RED_MAC_COUNT, outimg);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//
errcode = morphMachine.dilate(outimg, edgetmpimg[0]);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//
errcode = thinMachine.thinMatlabLike(edgetmpimg[0], outimg);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//
//errcode = binMachine.binarize(outimg);
//if (errcode != NO_ERROR)
// CAIC_GETCONT_ERRFREE(errcode);
//
CAIC_GETCONT_ERRFREE(NO_ERROR);
#undef CAIC_GETCONT_ERRFREE
}
// Kernel _searchPrimitiveContourKer
__global__ void _searchPrimitiveContourKer(
ImageCuda inimg, ImageCuda outimg, ImageCuda abnorimg,
ImageCuda prmtcont, ImageCuda prmtreg, unsigned trackrad)
{
// c r
// x y c columnr row
unsigned c = blockIdx.x * blockDim.x + threadIdx.x;
unsigned r = blockIdx.y * blockDim.y + threadIdx.y;
//
//
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
//
unsigned inidx = r * inimg.pitchBytes + c;
unsigned outidx = r * outimg.pitchBytes + c;
unsigned abnoridx = r * abnorimg.pitchBytes + c;
//
unsigned char inpixel = inimg.imgMeta.imgData[inidx];
// 0 0
//
if (inpixel == 0) {
outimg.imgMeta.imgData[outidx] = 0;
abnorimg.imgMeta.imgData[abnoridx] = 0;
return;
}
//
//
// prmtcontpxl == 0
unsigned prmtcontidx = r * prmtcont.pitchBytes + c;
unsigned char prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
//
int curr, curc;
//
for (int currad = 1; currad <= trackrad && prmtcontpxl == 0; currad++) {
//
//
for (int i = 0; i < trackrad && prmtcontpxl == 0; i++) {
//
curc = c + i;
curr = r - currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
curc = c + i;
curr = r + currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
curc = c - currad;
curr = r + i;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
curc = c + currad;
curr = r + i;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
//
//
curc = c - i - 1;
curr = r - currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
curc = c - i - 1;
curr = r + currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
curc = c - currad;
curr = r - i - 1;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
//
curc = c + currad;
curr = r - i - 1;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
}
}
//
if (prmtcontpxl != 0) {
//
outimg.imgMeta.imgData[outidx] = prmtcontpxl + OBJ_IDX_OFFSET;
abnorimg.imgMeta.imgData[abnoridx] = 0;
} else {
//
outimg.imgMeta.imgData[outidx] = 0;
abnorimg.imgMeta.imgData[abnoridx] =
prmtreg.imgMeta.imgData[r * prmtreg.pitchBytes + c];
}
}
// Host searchPrimitiveContour
__host__ int ConsolidateAndIdentifyContours::searchPrimitiveContour(
Image *inimg, Image *outimg, Image *abnormalimg)
{
//
if (this->primitiveContour == NULL || this->primitiveRegion == NULL)
return OP_OVERFLOW;
// NULL
if (inimg == NULL || outimg == NULL || abnormalimg == NULL)
return NULL_POINTER;
// Device
//
int errcode; //
//
size_t imgw = inimg->roiX2 - inimg->roiX1;
size_t imgh = inimg->roiY2 - inimg->roiY1;
// Device
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// Device
errcode = ImageBasicOp::copyToCurrentDevice(this->primitiveContour);
if (errcode != NO_ERROR)
return errcode;
// ROI
if (imgw > this->primitiveContour->roiX2 - this->primitiveContour->roiX1)
imgw = this->primitiveContour->roiX2 - this->primitiveContour->roiX1;
if (imgh > this->primitiveContour->roiY2 - this->primitiveContour->roiY1)
imgh = this->primitiveContour->roiY2 - this->primitiveContour->roiY1;
// Device
errcode = ImageBasicOp::copyToCurrentDevice(this->primitiveRegion);
if (errcode != NO_ERROR)
return errcode;
// ROI
if (imgw > this->primitiveRegion->roiX2 - this->primitiveRegion->roiX1)
imgw = this->primitiveRegion->roiX2 - this->primitiveRegion->roiX1;
if (imgh > this->primitiveRegion->roiY2 - this->primitiveRegion->roiY1)
imgh = this->primitiveRegion->roiY2 - this->primitiveRegion->roiY1;
// Device
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
//
// ROI
errcode = ImageBasicOp::makeAtCurrentDevice(outimg, imgw, imgh);
//
if (errcode != NO_ERROR)
return errcode;
} else {
//
if (imgw > outimg->roiX2 - outimg->roiX1)
imgw = outimg->roiX2 - outimg->roiX1;
if (imgh > outimg->roiY2 - outimg->roiY1)
imgh = outimg->roiY2 - outimg->roiY1;
}
// Device
errcode = ImageBasicOp::copyToCurrentDevice(abnormalimg);
if (errcode != NO_ERROR) {
//
// ROI
errcode = ImageBasicOp::makeAtCurrentDevice(abnormalimg, imgw, imgh);
//
if (errcode != NO_ERROR)
return errcode;
} else {
//
if (imgw > abnormalimg->roiX2 - abnormalimg->roiX1)
imgw = abnormalimg->roiX2 - abnormalimg->roiX1;
if (imgh > abnormalimg->roiY2 - abnormalimg->roiY1)
imgh = abnormalimg->roiY2 - abnormalimg->roiY1;
}
// ROI
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// ROI
ImageCuda prmtcontsubimgCud;
errcode = ImageBasicOp::roiSubImage(this->primitiveContour, &prmtcontsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// ROI
ImageCuda prmtregsubimgCud;
errcode = ImageBasicOp::roiSubImage(this->primitiveRegion, &prmtregsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// ROI
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// ROI
ImageCuda abnorsubimgCud;
errcode = ImageBasicOp::roiSubImage(abnormalimg, &abnorsubimgCud);
if (errcode != NO_ERROR)
return errcode;
//
insubimgCud.imgMeta.width = prmtcontsubimgCud.imgMeta.width =
prmtregsubimgCud.imgMeta.width =
outsubimgCud.imgMeta.width =
abnorsubimgCud.imgMeta.width = imgw;
insubimgCud.imgMeta.height = prmtcontsubimgCud.imgMeta.height =
prmtregsubimgCud.imgMeta.height =
outsubimgCud.imgMeta.height =
abnorsubimgCud.imgMeta.height = imgh;
// Kernel
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
// threshold
hipLaunchKernelGGL(( _searchPrimitiveContourKer), dim3(gridsize), dim3(blocksize), 0, 0,
insubimgCud, outsubimgCud, abnorsubimgCud,
prmtcontsubimgCud, prmtregsubimgCud, this->trackRad);
// CUDA
if (hipGetLastError() != hipSuccess)
return CUDA_ERROR;
//
return NO_ERROR;
}
| 27ed6aff18b143882a5418e2a9e48b10bab31871.cu | // ConsolidateAndIdentifyContours.cu
// 利用图像中图形的轮廓信息检测物体。
#include "ConsolidateAndIdentifyContours.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
#include "ErrorCode.h"
#include "Template.h"
#include "TemplateFactory.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:RED_MAC_COUNT
// 定义了重复进行边缘检测的次数。
#define RED_MAC_COUNT 4
// 宏:DILATE_TPL_SHAPE 和 SEARCH_TPL_SHAPE
// 定义了膨胀操作和搜索标准轮廓时的临域模板形状
#define DILATE_TPL_SHAPE TF_SHAPE_CIRCLE
#define SEARCH_TPL_SHAPE TF_SHAPE_BOX
// 宏:OBJ_IDX_OFFSET
// 定义了输出到标记轮廓中的标号偏移量。
#define OBJ_IDX_OFFSET 100
// 全局变量:_redMacDiffSize
// 不同的边缘检测器的检测半径。
static unsigned _redMacDiffSize[RED_MAC_COUNT] = { 3, 5, 7, 9 };
// 静态成员变量:redMachine(边缘检测处理机)
RobustEdgeDetection *ConsolidateAndIdentifyContours::redMachine = NULL;
// Kernel 函数:_searchPrimitiveContourKer(匹配并标记轮廓)
// 从检测中的轮廓图像中匹配标准轮廓图像中的相关轮廓。被匹配上的边缘将被标记成对
// 应的标号信息,未匹配上的轮廓被标记为异常点。
static __global__ void
_searchPrimitiveContourKer(
ImageCuda inimg, // 轮廓输入图像
ImageCuda outimg, // 标记后的输出图像
ImageCuda abnorimg, // 异常点图像
ImageCuda prmtcont, // 标准轮廓图像
ImageCuda prmtreg, // 物体区域图像
unsigned trackrad // 搜索半径
);
// Host 成员方法:initRedMachine(初始化边缘检测处理机)
__host__ int ConsolidateAndIdentifyContours::initRedMachine()
{
// 如果 redMachine 不为 NULL,则说明已经初始化过了。
if (redMachine != NULL)
return NO_ERROR;
// 申请指定个数的边缘检测器。
redMachine = new RobustEdgeDetection[RED_MAC_COUNT];
if (redMachine == NULL)
return OUT_OF_MEM;
// 迭代设定各个边缘检测器的检测半径。
int errcode = NO_ERROR;
for (int i = 0; i < RED_MAC_COUNT; i++) {
int curerrcode = redMachine[i].setDiffsize(_redMacDiffSize[i]);
// 最终返回的错误码应该是更加严重的错误。
if (curerrcode < errcode)
errcode = curerrcode;
}
// 初始化完毕,返回赋值过程中累积下来的错误码。
return errcode;
}
// Host 成员方法:initMorphMachine(初始化膨胀处理机)
__host__ int ConsolidateAndIdentifyContours::initMorphMachine()
{
// 取出膨胀处理机中原来的模板
Template *oldtpl = morphMachine.getTemplate();
// 通过模版工厂生成新的模板,这里暂时适用方形模板。
int errcode;
Template *curtpl = NULL;
size_t boxsize = this->dilationRad * 2 + 1;
errcode = TemplateFactory::getTemplate(&curtpl, DILATE_TPL_SHAPE, boxsize);
if (errcode != NO_ERROR)
return errcode;
// 将新生成的模板放入膨胀处理机中
errcode = morphMachine.setTemplate(curtpl);
if (errcode != NO_ERROR)
return errcode;
// 如果原始的模板不为 NULL,则需要释放对该模板的占用。
if (oldtpl != NULL)
TemplateFactory::putTemplate(oldtpl);
// 处理完毕,返回。
return NO_ERROR;
}
// Host 成员方法:getCsldtContoursImg(获取轮廓图像)
__host__ int ConsolidateAndIdentifyContours::getCsldtContoursImg(
Image *inimg, Image *outimg)
{
// 检查输入输出图像是否为 NULL。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 由于后续的失败处理要清除所申请的临时图像,因此设计一个宏来简化代码,方便
// 代码维护。
#define CAIC_GETCONT_ERRFREE(errcode) do { \
for (int _i_cge = 0; _i_cge < RED_MAC_COUNT; _i_cge++) { \
if (edgetmpimg[_i_cge] != NULL) \
ImageBasicOp::deleteImage(edgetmpimg[_i_cge]); \
} \
return (errcode); \
} while (0)
// 该迭代完成两件事情:第一是完成边缘检测输出图像的创建;另一件则是调用边缘
// 检测算法完成边缘检测。
int errcode = NO_ERROR;
Image *edgetmpimg[RED_MAC_COUNT] = { NULL };
for (int i = 0; i < RED_MAC_COUNT; i++) {
// 创建边缘检测的输出图像。
errcode = ImageBasicOp::newImage(edgetmpimg + i);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//cout << "AA" << i << endl;
// 调用边缘检测方法,获得边缘图像。
errcode = redMachine[i].detectEdgeSA(inimg, edgetmpimg[i], NULL);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//cout << "BB" << i << endl;
}
// 合并在不同参数下边缘检测的结果。
errcode = combineMachine.combineImageMax(edgetmpimg, RED_MAC_COUNT, outimg);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
// 对边缘进行膨胀操作,以连接一些断线的点
errcode = morphMachine.dilate(outimg, edgetmpimg[0]);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
// 对膨胀后的边缘进行细化操作,在此恢复其单一线宽。
errcode = thinMachine.thinMatlabLike(edgetmpimg[0], outimg);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
// 由于边缘检测算法输出的图像为二值图像,因此不需要再进行二值化处理了
//errcode = binMachine.binarize(outimg);
//if (errcode != NO_ERROR)
// CAIC_GETCONT_ERRFREE(errcode);
// 处理完毕返回。
CAIC_GETCONT_ERRFREE(NO_ERROR);
#undef CAIC_GETCONT_ERRFREE
}
// Kernel 函数:_searchPrimitiveContourKer(匹配并标记轮廓)
__global__ void _searchPrimitiveContourKer(
ImageCuda inimg, ImageCuda outimg, ImageCuda abnorimg,
ImageCuda prmtcont, ImageCuda prmtreg, unsigned trackrad)
{
// 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。
unsigned c = blockIdx.x * blockDim.x + threadIdx.x;
unsigned r = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
// 计算输入输出图像的访存下标。
unsigned inidx = r * inimg.pitchBytes + c;
unsigned outidx = r * outimg.pitchBytes + c;
unsigned abnoridx = r * abnorimg.pitchBytes + c;
// 读取输入图像中对应的像素值。
unsigned char inpixel = inimg.imgMeta.imgData[inidx];
// 如果该点像素值为0,即当前点不在检出的轮廓上,则在输出图像中直接赋 0 值,
// 不进行后续的搜索处理。
if (inpixel == 0) {
outimg.imgMeta.imgData[outidx] = 0;
abnorimg.imgMeta.imgData[abnoridx] = 0;
return;
}
// 按照由中心向周围的方式搜索当前点对应的物体标记值。最先判断当前点位置在标
// 准轮廓图像中是否有对应的轮廓标记。如果存在对应的轮廓标记,通过后续的循环
// 条件中的 prmtcontpxl == 0 则会略过整个后续搜索。
unsigned prmtcontidx = r * prmtcont.pitchBytes + c;
unsigned char prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
// 由近及远搜索当前位置的临近位置,查看是否可以命中标准轮廓上的点。
int curr, curc;
// 外层循环处理半径
for (int currad = 1; currad <= trackrad && prmtcontpxl == 0; currad++) {
// 迭代各个半径下的点,由中点向对角点检测。当发现某一标准轮廓点时,退出
// 循环,不再进一步搜索。
for (int i = 0; i < trackrad && prmtcontpxl == 0; i++) {
// 检测上方右侧点
curc = c + i;
curr = r - currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测下方右侧点
curc = c + i;
curr = r + currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测左方下侧点
curc = c - currad;
curr = r + i;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测右方下侧点
curc = c + currad;
curr = r + i;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 根据计算公式,左侧点(上侧点)要比右侧点(下侧点)更加外围一些,
// 故而统一在稍候检测左侧系列点。
// 检测上方左侧点
curc = c - i - 1;
curr = r - currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测下方左侧点
curc = c - i - 1;
curr = r + currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测左方上侧点
curc = c - currad;
curr = r - i - 1;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测右方上侧点
curc = c + currad;
curr = r - i - 1;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
}
}
// 根据是否找到标准轮廓点作出输出动作
if (prmtcontpxl != 0) {
// 当匹配到标准轮廓点时,标记输出图像,但不标记异常点图像。
outimg.imgMeta.imgData[outidx] = prmtcontpxl + OBJ_IDX_OFFSET;
abnorimg.imgMeta.imgData[abnoridx] = 0;
} else {
// 当匹配标准轮廓点失败时,标记该点为异常点,写入异常点图像。
outimg.imgMeta.imgData[outidx] = 0;
abnorimg.imgMeta.imgData[abnoridx] =
prmtreg.imgMeta.imgData[r * prmtreg.pitchBytes + c];
}
}
// Host 成员方法:searchPrimitiveContour(匹配并标记轮廓)
__host__ int ConsolidateAndIdentifyContours::searchPrimitiveContour(
Image *inimg, Image *outimg, Image *abnormalimg)
{
// 当标准轮廓点和标准区域点未设置时,报错返回。
if (this->primitiveContour == NULL || this->primitiveRegion == NULL)
return OP_OVERFLOW;
// 当输入的参数含有 NULL 指针时,报错返回。
if (inimg == NULL || outimg == NULL || abnormalimg == NULL)
return NULL_POINTER;
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为
// 输入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 局部变量,本次操作的图像尺寸
size_t imgw = inimg->roiX2 - inimg->roiX1;
size_t imgh = inimg->roiY2 - inimg->roiY1;
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将标准轮廓图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(this->primitiveContour);
if (errcode != NO_ERROR)
return errcode;
// 根据标准轮廓图像的 ROI 区域尺寸调整计算尺寸。
if (imgw > this->primitiveContour->roiX2 - this->primitiveContour->roiX1)
imgw = this->primitiveContour->roiX2 - this->primitiveContour->roiX1;
if (imgh > this->primitiveContour->roiY2 - this->primitiveContour->roiY1)
imgh = this->primitiveContour->roiY2 - this->primitiveContour->roiY1;
// 将标准区域图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(this->primitiveRegion);
if (errcode != NO_ERROR)
return errcode;
// 根据标准区域图像的 ROI 区域尺寸调整计算尺寸。
if (imgw > this->primitiveRegion->roiX2 - this->primitiveRegion->roiX1)
imgw = this->primitiveRegion->roiX2 - this->primitiveRegion->roiX1;
if (imgh > this->primitiveRegion->roiY2 - this->primitiveRegion->roiY1)
imgh = this->primitiveRegion->roiY2 - this->primitiveRegion->roiY1;
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图
// 像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(outimg, imgw, imgh);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
} else {
// 如果输出图片已经含有数据,则用这个数据更新最终参与计算的尺寸
if (imgw > outimg->roiX2 - outimg->roiX1)
imgw = outimg->roiX2 - outimg->roiX1;
if (imgh > outimg->roiY2 - outimg->roiY1)
imgh = outimg->roiY2 - outimg->roiY1;
}
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(abnormalimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图
// 像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(abnormalimg, imgw, imgh);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
} else {
// 如果输出图片已经含有数据,则用这个数据更新最终参与计算的尺寸
if (imgw > abnormalimg->roiX2 - abnormalimg->roiX1)
imgw = abnormalimg->roiX2 - abnormalimg->roiX1;
if (imgh > abnormalimg->roiY2 - abnormalimg->roiY1)
imgh = abnormalimg->roiY2 - abnormalimg->roiY1;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取标准轮廓图像的 ROI 子图像。
ImageCuda prmtcontsubimgCud;
errcode = ImageBasicOp::roiSubImage(this->primitiveContour, &prmtcontsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取标准区域图像的 ROI 子图像。
ImageCuda prmtregsubimgCud;
errcode = ImageBasicOp::roiSubImage(this->primitiveRegion, &prmtregsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取异常点图像的 ROI 子图像。
ImageCuda abnorsubimgCud;
errcode = ImageBasicOp::roiSubImage(abnormalimg, &abnorsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据之前得到的计算区域尺寸调整子图像的尺寸。
insubimgCud.imgMeta.width = prmtcontsubimgCud.imgMeta.width =
prmtregsubimgCud.imgMeta.width =
outsubimgCud.imgMeta.width =
abnorsubimgCud.imgMeta.width = imgw;
insubimgCud.imgMeta.height = prmtcontsubimgCud.imgMeta.height =
prmtregsubimgCud.imgMeta.height =
outsubimgCud.imgMeta.height =
abnorsubimgCud.imgMeta.height = imgh;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
// 调用核函数,根据阈值 threshold 进行二值化处理。
_searchPrimitiveContourKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, abnorsubimgCud,
prmtcontsubimgCud, prmtregsubimgCud, this->trackRad);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕,退出。
return NO_ERROR;
}
|
ab5103cf77eff6200db3b54586563d7ba7ad4deb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<string.h>
#include <stdlib.h>
#include <stdarg.h>
#include<time.h>
#include <math.h>
#include "MNIST_for_C-master/mnist.h"
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
// C language does not contain a boolean type, we are defining our type...
#define FALSE 0
#define TRUE 1
// All vectors/matrices are stored as this structure in the memory...
struct Vector2D
{
// Whole vector/matrix data is stored in one dimensional array...
// All numbers are floating point numbers....
//This pointer points where the vector/matrix data lyies....
float * data;
// Row number of the vector/matrix...
int height;
// Column number of the vector/matrix...
int width;
int size;
};
// We are defining a type from this structure definition...
typedef struct Vector2D Vector2D;
__device__ Vector2D array[5];
float * device_matrix_location;
Vector2D * CreateVector2D(float * data, int height, int width, bool fill = true, bool store = false)
{
// A new structure is allocated in GPU memory for matrix/vector...
Vector2D * temp ;
CHECK(hipMalloc(&temp, sizeof(Vector2D)));
float * temp2;
CHECK(hipMalloc(&temp2, sizeof(float)*height*width));
if(fill == true)
CHECK(hipMemcpy(temp2, data, sizeof(float)*height*width, hipMemcpyHostToDevice));
CHECK(hipMemcpy(&temp->data, &temp2, sizeof(float *), hipMemcpyHostToDevice));
CHECK(hipMemcpy(&temp->height, (void *)(&height), sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemcpy(&temp->width, (void *)(&width), sizeof(int), hipMemcpyHostToDevice));
//temp->height = height;
//temp->width = width;
if(store == true)
device_matrix_location = temp2;
hipDeviceSynchronize();
return temp;
}
__global__ void MatrixAdd(Vector2D * result, Vector2D * vec1, Vector2D * vec2)
{
if((vec1->width != vec2->width) || (vec1->height != vec2->height))
{
printf("\n\n**********Matrix add diff dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nMatrixAdd\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = vec1->data[tid] + vec2->data[tid];
}
}
__global__ void MatrixSubtract(Vector2D * result, Vector2D * vec1, Vector2D * vec2)
{
if((vec1->width != vec2->width) || (vec1->height != vec2->height))
{
printf("\n\n**********Matrix Subtract diff dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nMatrixSubtractvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nMatrixSubtract\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = vec1->data[tid] - vec2->data[tid];
}
}
__global__ void TransposeVector2D(Vector2D * res, Vector2D * m1)
{
if((res->width != m1->height) || (res->height != m1->width))
{
printf("\n\n**********Matrix Transpose diff dimension....");
printf("\nres->width : %d res->heihgt : %d - m1->width : %d m1->height %d\n", res->width, res->height, m1->width, m1->height);
return;
}
int thx = blockIdx.x*blockDim.x+ threadIdx.x;
int thy = blockIdx.y*blockDim.y+threadIdx.y;
int tid = thx + thy*m1->width;
if(tid ==0)
{
//printf("\nTransposeVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < m1->height) || (blockDim.x*gridDim.x<m1->width))
{
printf("\nTransposeVector2D\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < m1->width*m1->height)
{
res->data[thy+thx*m1->height] = m1->data[tid] ;
//printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
}
__global__ void DisplayVector2D(Vector2D * vector)
{
printf("[");
for(int h = 0; h < vector->height; h++)
{
printf("[");
for( int w = 0; w < vector->width-1; w++)
{
printf("%f, ", vector->data[h*vector->width+w]);
}
printf("%f], \n", vector->data[h*vector->width+vector->width-1]);
}
printf("]\n");
printf("Row : %d - Width : %d \n\n", vector->height, vector->width);
}
__global__ void MatrixProduct(Vector2D * result, Vector2D * m1, Vector2D * m2)
{
int thx = blockIdx.x*blockDim.x+ threadIdx.x;
int thy = blockIdx.y*blockDim.y+threadIdx.y;
if(thx == 0 && thy ==0){
if((m1->width != m2->height))
{
printf("\n\n**********Matrix Product error dimension....");
printf("\nm1->width %d m1->height %d - m2->width %d m2->height %d\n", m1->width, m1->height, m2->width, m2->height);
return;
}
//printf("\nMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", result->width, result->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if(thx ==0 && thy == 0)
if((blockDim.y*gridDim.y < result->height) || (blockDim.x*gridDim.x<result->width))
{
printf("\nMatrixProduct\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", result->width, result->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(thx < result->width && thy < result->height)
{
float toplam = 0;
for(int h = 0; h < m1->width; h++)
{
toplam += m1->data[thy*m1->width+h] * m2->data[h*m2->width+thx];
}
result->data[thy*result->width + thx] = toplam;
//printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
}
__global__ void ScalarMinusVector2D(Vector2D * result, float value, Vector2D * vec1)
{
if((result->width != vec1->width) || (result->height != vec1->height))
{
printf("\n\n**********Scaar Minus vectrordiff dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nScalarMinusVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nScalarMinusVector2D\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = 1-vec1->data[tid];
}
}
__global__ void ScalarMatrixProduct(Vector2D * result, float scalar, Vector2D * vec1)
{
if((result->width != vec1->width) || (result->height != vec1->height))
{
printf("\n\n**********ScalarMatrixProduct dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nScalarMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nScalarMatrixProduct\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = scalar*vec1->data[tid];
}
}
__global__ void MatrixPairwiseProduct(Vector2D * result, Vector2D * vec1, Vector2D * vec2)
{
if((vec1->width != vec2->width) || (vec1->height != vec2->height))
{
printf("\n\n**********MatrixPairwiseProduct dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nMatrixPairwiseProduct\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = vec1->data[tid] * vec2->data[tid];
}
}
__device__ float error_sum[32];
__global__ void Sum2D(Vector2D * vec)
{
int tid = threadIdx.y;
int val = 0;
int width = vec->width;
for(int a = 0; a < width; a++)
{
val += vec->data[a+tid*width];
}
error_sum[tid] = val;
}
__global__ void ArgMax2D(Vector2D * vector)
{
}
__global__ void Log2D(Vector2D * result, Vector2D * vec1)
{
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nLog2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nLog2D\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
float val;
if(tid < vec1->width*vec1->height)
{
val = log(vec1->data[tid]);
result->data[tid] = val;
}
}
__global__ void Exponential(Vector2D * result, Vector2D * vec1)
{
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nExponentialvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\Exponential\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = exp(vec1->data[tid]);
}
}
__global__ void Softmax(Vector2D * result, Vector2D * vec1)
{
int tid = blockIdx.y*blockDim.y + threadIdx.y;
if(tid ==0)
{
//printf("\nSoftmaxvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if(blockDim.y*gridDim.y < vec1->height)
{
printf("\nSoftmax\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->height)
{
float toplam = 0;
for(int a = 0; a < vec1->width;a++)
{
toplam += vec1->data[a+tid*vec1->width];
}
for(int a = 0; a < vec1->width;a++)
{
result->data[a+tid*vec1->width] = vec1->data[a+tid*vec1->width]/toplam;
}
}
}
__global__ void Sigmoid(Vector2D * result, Vector2D * vec1)
{
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nSigmoidvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nSigmoid\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = 1.0/(1.0 + exp(-(vec1->data[tid])));
}
}
__global__ void PointerSet(Vector2D * f1, Vector2D * f2, int shift, int batch_size)
{
f1->width = f2->width;
f1->height = batch_size;
f1->data = f2->data + f2->width*shift;
}
/*
*
* MLP functions....
*
*
*
*
*
*/
float generate_uniform(float a, float b)
{
return rand() / (RAND_MAX + 1.0) * (b - a) + a;
}
Vector2D ** mlp_structure_information;
Vector2D ** mlp_layer_output_structure;
Vector2D ** mlp_layer_bias_structure;
Vector2D ** weight_array;
int * layer_structure = NULL;
int mlp_layer_count = 0;
Vector2D ** layer_results;
Vector2D ** layer_updates;
Vector2D ** bias_array;
Vector2D ** bias_results;
Vector2D ** device_weight_array;
Vector2D ** layer_error_array;
Vector2D ** scalar_minus_array;
Vector2D ** bias_updates;
float learning_rate = 0.000001;
Vector2D * CreateWeightMatrix(int input_count, int output_count);
void CreateMLP(int layer_count, ...)
{
mlp_layer_count = layer_count;
weight_array = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D *));
bias_array = (Vector2D **)malloc((layer_count - 1 )*sizeof(Vector2D *));
bias_results = (Vector2D **)malloc((layer_count - 1 )*sizeof(Vector2D *));
bias_updates = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
layer_error_array = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
mlp_structure_information = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
mlp_layer_output_structure = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
//This will hold the layer values afte forward pass to be used in backpropagation...
layer_results = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D*));
layer_updates = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D*));
layer_structure = (int *)malloc(layer_count*sizeof(int));
va_list ap;
va_start(ap, layer_count);
for(int a=0; a<layer_count;a++)
{
layer_structure[a] = va_arg(ap, int);
}
va_end(ap);
printf("\nMLP structure\n");
for(int a=0; a<mlp_layer_count;a++)
printf("%d ", layer_structure[a]);
printf("\n\n");
for(int i=0; i<layer_count-1;i++)
{
//printf("\n\nLayer %d\n", i);
weight_array[i] = CreateWeightMatrix(layer_structure[i], layer_structure[i+1]);
layer_updates[i] = CreateVector2D(NULL, layer_structure[i], layer_structure[i+1], false);
bias_array[i] = CreateWeightMatrix(1, layer_structure[i+1]);
}
}
/*
Xavier He initialization will be used...
*/
Vector2D * CreateWeightMatrix(int input_count, int output_count)
{
float init_range = 0;
Vector2D * temp = (Vector2D *)malloc(sizeof(Vector2D));
Vector2D * device_temp;
CHECK(hipMalloc(&device_temp, sizeof(Vector2D)));
temp->height = input_count; //For bias...
temp->width = output_count;
temp->data = (float * )malloc(sizeof(float)*(input_count)*output_count);
init_range = sqrt(2.0 / input_count);
for(int a=0; a<(input_count)*output_count; a++)
{
temp->data[a] = generate_uniform(-init_range, init_range);
}
float * temp2;
CHECK(hipMalloc(&temp2, sizeof(float)*temp->height*temp->width));
CHECK(hipMemcpy(temp2, temp->data, sizeof(float)*temp->height*temp->width, hipMemcpyHostToDevice));
CHECK(hipMemcpy(&device_temp->data, &temp2, sizeof(float *), hipMemcpyHostToDevice));
CHECK(hipMemcpy(&device_temp->height, &(temp->height), sizeof(int), hipMemcpyHostToDevice));
CHECK(hipMemcpy(&device_temp->width, &(temp->width), sizeof(int), hipMemcpyHostToDevice));
return device_temp;
}
Vector2D * device_ones = NULL, * device_transpose_ones;
int res_height;
Vector2D * device_input;
Vector2D * device_input_temp;
Vector2D * final_output;
Vector2D * device_input_transpose;
dim3 grid;
void FeedForward(Vector2D * input, int input_width, int input_height)
{
int thread_block_x = 32, thread_block_y = 32;
//printf("\n\n**************Feedforward*********** width : %d height : %d\n", input_width, input_height);
// If there are more than sample in the input, the output of every layer is a matrix
// But the bias vector is just one vector... It should be added to every row of the output of layer
// To be able perform this operation, every bias vector is multiplied with a vector consisting of ones...
// By douing so a matrix form is obtained and this matrix is added to result
/*
Suppose a(0) = [1 2 3] and input = [1, 5] b(0)= [2 3] matmul(input, a(0)) = [23 35]
[4 5 6] [5, 6] [73 86]
[4, 6]
So for each sample the b(0) should be added to every row of matmul result
So we can do it by matrix multiplied by [1] column vector matmul([1], b(0)) = [2 3] by so it can be added..
[1] [1] [2 3]
1 is used because every layer 0th node is assumed to be 1 while adding bias...
*/
if(device_ones == NULL)
{
float * ones_ = (float *)malloc(sizeof(float)*input_height);
for(int i=0;i<input_height;i++)ones_[i] = 1.0;
device_ones = CreateVector2D(ones_, 1, input_height, true);
device_transpose_ones = CreateVector2D(ones_, input_height, 1, true);
res_height = input_height;
for(int current_layer = 1; current_layer < mlp_layer_count; current_layer++)
{
Vector2D * res = CreateVector2D(NULL, input_height, layer_structure[current_layer], false);
layer_results[current_layer-1] = res;
Vector2D * res2 = CreateVector2D(NULL, input_height, layer_structure[current_layer], false);
bias_results[current_layer-1] = res2;
}
device_input_temp = input;
device_input_transpose = CreateVector2D(NULL, input_width, input_height, false);
device_input = device_input_temp;
}
else
{
device_input = device_input_temp;
}
//printf("\nInput data : \n");
//DisplayVector2D<<<1, 1>>>(device_input);
//hipDeviceSynchronize();
// By staring from first layer we are performing forward pass iteratively...
for(int current_layer=1; current_layer < mlp_layer_count; current_layer++)
{
{
// weight matrix of the layer and the previous input is multiplied
dim3 b(thread_block_x, thread_block_y);
dim3 grid( (layer_structure[current_layer]+b.x-1)/b.x, (input_height+b.y-1)/b.y);
/*
printf("\n\n\nLayer : %d\ndevice input : \n", current_layer-1);
DisplayVector2D<<<1, 1>>>(device_input);
hipDeviceSynchronize();
printf("\weight input : \n");
DisplayVector2D<<<1, 1>>>(weight_array[current_layer-1]);
hipDeviceSynchronize();
printf("\nresult vector .x %d .y %d\n", layer_structure[current_layer], res_height);
printf("\ngrid.x : %d - grid.y : %d\n", grid.x, grid.y);
*/
hipLaunchKernelGGL(( MatrixProduct), dim3(grid), dim3(b), 0, 0, layer_results[current_layer-1], device_input, weight_array[current_layer-1]);
hipDeviceSynchronize();
// Bias matrix is obtained...
hipLaunchKernelGGL(( MatrixProduct), dim3(grid), dim3(b), 0, 0, bias_results[current_layer-1], device_transpose_ones, bias_array[current_layer-1]);
hipDeviceSynchronize();
// The bias is added to matmul operation...
hipLaunchKernelGGL(( MatrixAdd), dim3(grid), dim3(b), 0, 0, layer_results[current_layer-1], layer_results[current_layer-1], bias_results[current_layer-1]);
hipDeviceSynchronize();
// If we are at output layer the hidden layer will be passed through the sigmoid function...
if(current_layer < mlp_layer_count -1){
hipLaunchKernelGGL(( Sigmoid), dim3(grid), dim3(b), 0, 0, layer_results[current_layer-1], layer_results[current_layer-1]);
//input = sigmoid(temp);
hipDeviceSynchronize();
}
// If at output we are softmaxing the last hidden layer output
else
{
hipLaunchKernelGGL(( Exponential), dim3(grid), dim3(b), 0, 0, layer_results[current_layer-1], layer_results[current_layer-1]);
hipDeviceSynchronize();
grid.x = 1; b.x = 1;
hipLaunchKernelGGL(( Softmax), dim3(grid), dim3(b), 0, 0, layer_results[current_layer-1], layer_results[current_layer-1]);
hipDeviceSynchronize();
//input = softmax(temp);
}
device_input = layer_results[current_layer-1];
}
}
final_output = device_input;
}
Vector2D * error;
Vector2D * label_data;
bool first_call = true;
Vector2D ** layer_weights_transpose;
Vector2D ** layer_results_transpose;
Vector2D * error_result;
Vector2D * batch_data;
Vector2D * batch_label;
Vector2D * whole_input_data;
void BackPropagate(Vector2D * input, Vector2D * labels, int input_width, int output_width, int batch_size)
{
//printf("\n\nBackPropagation\n\n");
// Firstly we are getting the outputs of each layer...
int thread_block_x = 32, thread_block_y = 32;
if(first_call == true)
{
first_call = false;
layer_weights_transpose = (Vector2D **)malloc(sizeof(Vector2D*)*(mlp_layer_count-1));
layer_results_transpose = (Vector2D **)malloc(sizeof(Vector2D*)*(mlp_layer_count-1));
scalar_minus_array = (Vector2D **)malloc(sizeof(Vector2D*)*(mlp_layer_count-1));
for(int a=0; a<mlp_layer_count-1;a++)
{
layer_weights_transpose[a] = CreateVector2D(NULL, layer_structure[a+1], layer_structure[a], false);
}
for(int current_layer = 1; current_layer < mlp_layer_count; current_layer++)
{
Vector2D * res = CreateVector2D(NULL, layer_structure[current_layer], batch_size, false);
layer_results_transpose[current_layer-1] = res;
Vector2D * res2 = CreateVector2D(NULL, batch_size, layer_structure[current_layer], false);
scalar_minus_array[current_layer-1] = res2;
}
for(int a = 1; a<mlp_layer_count;a++)
{
layer_error_array[a-1] = CreateVector2D(NULL, batch_size, layer_structure[a],false);
bias_updates[a-1] = CreateVector2D(NULL, 1, layer_structure[a], false);
}
error_result = CreateVector2D(NULL, batch_size, layer_structure[mlp_layer_count-1], false);
}
FeedForward(input, input_width, batch_size);
// We are calculating the error at outout....
dim3 block(thread_block_x, thread_block_y);
dim3 grid((output_width+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixSubtract), dim3(grid), dim3(block), 0, 0, layer_error_array[mlp_layer_count-2], labels, final_output);
hipDeviceSynchronize();
Vector2D * temp1;
// We are starting from output layer...
for( int current_layer = mlp_layer_count-2; current_layer>0;current_layer--)
{
// if we are output layer its weight should be adjust by simply
// performing matrix multiplication with the previous layer output and output errpr...
if(current_layer == mlp_layer_count-2)
{
// The previous layer's output is transposed...
dim3 block(thread_block_x, thread_block_y);
dim3 grid((layer_structure[current_layer]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( TransposeVector2D), dim3(grid), dim3(block), 0, 0, layer_results_transpose[current_layer-1], layer_results[current_layer-1]);
hipDeviceSynchronize();
// The bias also must be updated similar to normal weights
/*
We could have inserted the bias weights to normal layer weights matrix but in intermediate
layer while performing calculation column of 1 should be added and this brings some more extra work in memory..
If it weren't seperated the output of the previous layer output would be X(2) = [1 2 3]
[1 4 5]
[1 6 7]
while updating the weight we are taking transpose of the previous layer so that is why for seperate
bias updates we are performing matrix multiplication with bias. If we transpose the output above
we would have obtained [1 1 1] this one column vector comes from this fact...
[2 4 6]
[1 6 7]
*/
// We are storing the updates in an array of pointer later we will update our actual weights...
dim3 block2(layer_structure[mlp_layer_count-1] ,layer_structure[current_layer]);
dim3 grid2((layer_structure[mlp_layer_count-1]+block.x-1)/block.x, (layer_structure[current_layer]+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid2), dim3(block), 0, 0, layer_updates[current_layer], layer_results_transpose[current_layer-1], layer_error_array[current_layer]);
hipDeviceSynchronize();
dim3 temp(layer_structure[current_layer+1] ,1);
dim3 grid3((layer_structure[current_layer+1]+block.x-1)/block.x, (1+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid3), dim3(block), 0, 0, bias_updates[current_layer], device_ones, layer_error_array[current_layer]);
hipDeviceSynchronize();
continue;
}
temp1 = layer_error_array[current_layer+1] ;
dim3 block3(layer_structure[current_layer+2], layer_structure[current_layer+1]);
dim3 grid3((layer_structure[current_layer+2]+block.x-1)/block.x, (layer_structure[current_layer+1]+block.y-1)/block.y);
hipLaunchKernelGGL(( TransposeVector2D), dim3(grid3), dim3(block), 0, 0, layer_weights_transpose[current_layer+1], weight_array[current_layer+1]);
hipDeviceSynchronize();
// Error propagated to current layer is obtained by multiplying next layer's error with transpose of next layer's weight matrix
// later pairwisely multiplying the output of next layer and with 1 - next layer's output....
// error is propagated to next layer
dim3 block4(layer_structure[current_layer+1], batch_size );
dim3 grid4((layer_structure[current_layer+1]+block.x-1)/block.y, ( batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid4), dim3(block), 0, 0, layer_error_array[current_layer], temp1, layer_weights_transpose[current_layer+1]);
hipDeviceSynchronize();
dim3 block5(layer_structure[current_layer+1], batch_size);
dim3 grid5((layer_structure[current_layer+1]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( ScalarMinusVector2D), dim3(grid5), dim3(block), 0, 0, scalar_minus_array[current_layer], 1, layer_results[current_layer]);
hipDeviceSynchronize();
// When we multiply prev error with the next layer's output
/*
next layer output was obtained via sigmoid, derivative of the sigmoid is (sigmoid*(1-sigmoid))
While applying chaing rule this term takes places in the backprogation...
*/
// So multiplying it we are exactly calculating the next layer's error...
hipLaunchKernelGGL(( MatrixPairwiseProduct), dim3(grid5), dim3(block), 0, 0, layer_error_array[current_layer], layer_error_array[current_layer], layer_results[current_layer]);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MatrixPairwiseProduct), dim3(grid5), dim3(block), 0, 0, layer_error_array[current_layer], layer_error_array[current_layer], scalar_minus_array[current_layer]);
hipDeviceSynchronize();
// We are transposing the prev layer output to calculate current layer's weight change...
dim3 block6(layer_structure[current_layer], batch_size);
dim3 grid6((layer_structure[current_layer]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( TransposeVector2D), dim3(grid6), dim3(block), 0, 0, layer_results_transpose[current_layer-1], layer_results[current_layer - 1 ]);
hipDeviceSynchronize();
dim3 block7(layer_structure[current_layer+1], layer_structure[current_layer]);
dim3 grid7((layer_structure[current_layer+1]+block.x-1)/block.x, (layer_structure[current_layer]+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid7), dim3(block), 0, 0, layer_updates[current_layer], layer_results_transpose[current_layer-1], layer_error_array[current_layer]);
hipDeviceSynchronize();
// Bias is also updated according to explanation above....
dim3 block8(layer_structure[current_layer+1], 1);
dim3 grid8((layer_structure[current_layer+1]+block.x-1)/block.x, (1+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid8), dim3(block), 0, 0, bias_updates[current_layer], device_ones, layer_error_array[current_layer]);
hipDeviceSynchronize();
}
temp1 = layer_error_array[1];
dim3 block9(layer_structure[2], layer_structure[1]);
dim3 grid9((layer_structure[2]+block.x-1)/block.x, (layer_structure[1]+block.y-1)/block.y);
hipLaunchKernelGGL(( TransposeVector2D), dim3(grid9), dim3(block), 0, 0, layer_weights_transpose[1], weight_array[1]);
hipDeviceSynchronize();
dim3 block10(layer_structure[1], batch_size );
dim3 grid10((layer_structure[1]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid10), dim3(block), 0, 0, layer_error_array[0], temp1, layer_weights_transpose[1]);
hipDeviceSynchronize();
dim3 block11(layer_structure[0+1], batch_size);
dim3 grid11((layer_structure[0+1]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( ScalarMinusVector2D), dim3(grid11), dim3(block), 0, 0, scalar_minus_array[0], 1, layer_results[0]);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MatrixPairwiseProduct), dim3(grid11), dim3(block), 0, 0, layer_error_array[0], layer_error_array[0], layer_results[0]);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MatrixPairwiseProduct), dim3(grid11), dim3(block), 0, 0, layer_error_array[0], layer_error_array[0], scalar_minus_array[0]);
hipDeviceSynchronize();
dim3 block12(layer_structure[0], batch_size);
dim3 grid12((layer_structure[0]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( TransposeVector2D), dim3(grid12), dim3(block), 0, 0, device_input_transpose , batch_data);
hipDeviceSynchronize();
dim3 block13(layer_structure[1], layer_structure[0]);
dim3 grid13((layer_structure[1]+block.x-1)/block.x, (layer_structure[0]+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid13), dim3(block), 0, 0, layer_updates[0], device_input_transpose, layer_error_array[0]);
hipDeviceSynchronize();
dim3 block14(layer_structure[0+1], 1);
dim3 grid14((layer_structure[0+1]+block.x-1)/block.x, (1+block.y-1)/block.y);
hipLaunchKernelGGL(( MatrixProduct), dim3(grid14), dim3(block), 0, 0, bias_updates[0], device_ones, layer_error_array[0]);
hipDeviceSynchronize();
for(int weight_index=0; weight_index < mlp_layer_count - 1;weight_index++)
{
dim3 grid((layer_structure[weight_index+1]+block.x-1)/block.x, (layer_structure[weight_index]+block.y-1)/block.y);
hipLaunchKernelGGL(( ScalarMatrixProduct), dim3(grid), dim3(block), 0, 0, layer_updates[weight_index], learning_rate, layer_updates[weight_index]);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MatrixAdd), dim3(grid), dim3(block), 0, 0, weight_array[weight_index], weight_array[weight_index], layer_updates[weight_index]);
hipDeviceSynchronize();
dim3 grid2((layer_structure[weight_index+1]+block.x-1)/block.x ,1);
hipLaunchKernelGGL(( ScalarMatrixProduct), dim3(grid2), dim3(block), 0, 0, bias_updates[weight_index], learning_rate, bias_updates[weight_index]);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MatrixAdd), dim3(grid2), dim3(block), 0, 0, bias_array[weight_index], bias_array[weight_index], bias_updates[weight_index]);
hipDeviceSynchronize();
}
}
Vector2D * CreateVector2DCPU(float * data, int height, int width)
{
// A new structure is allocated in memory for matrix/vector...
Vector2D * temp = (Vector2D *)malloc(sizeof(struct Vector2D));
temp->data = data;
temp->height = height;
temp->width = width;
return temp;
};
Vector2D * CreateOneHot(Vector2D * indexes, int vector_length)
{
Vector2D * one_hot_vector = (Vector2D*)malloc(sizeof(Vector2D));
one_hot_vector->height = indexes->height;
one_hot_vector->width = vector_length;
one_hot_vector->size = one_hot_vector->height;
one_hot_vector->data = (float *)malloc(sizeof(float)*indexes->height*vector_length);
memset(one_hot_vector->data, 0, sizeof(float)*indexes->height*vector_length);
for(int i=0; i<one_hot_vector->height;i++)
{
one_hot_vector->data[i*vector_length+(int)indexes->data[i*indexes->width]] = 1.0;
}
return one_hot_vector;
}
#define kk 10
void read_mnist_(const char *file_path, int num_data, int len_info, int arr_n, unsigned char data_char[][kk], int info_arr[])
{
}
void DisplayVector2DCPU(Vector2D * vector)
{
printf("[");
for(int h = 0; h < vector->height; h++)
{
printf("[");
for( int w = 0; w < vector->width-1; w++)
{
printf("%f, ", vector->data[h*vector->width+w]);
}
printf("%f], \n", vector->data[h*vector->width+vector->width-1]);
}
printf("\b\b\b]");
}
#define EMPTY printf("\n\n");
Vector2D * load_text_data()
{
FILE * dosya = fopen("text_data.dat", "rb");
int width, height;
fread(&width, sizeof(int), 1, dosya);
fread(&height, sizeof(int), 1, dosya);
float * loaded_data = (float *)malloc(width*height*sizeof(float));
for(int a =0; a< width*height; a++)
fread(&loaded_data[a], sizeof(float), 1, dosya);
fclose(dosya);
printf("Width : %d - Height : %d\n", width, height);
Vector2D * vec = CreateVector2DCPU(loaded_data, height, width);
return vec;
}
Vector2D * load_label_data()
{
FILE * dosya = fopen("label_data.dat", "rb");
int width, height;
fread(&width, sizeof(int), 1, dosya);
fread(&height, sizeof(int), 1, dosya);
float * loaded_data = (float *)malloc(width*height*sizeof(float));
int value;
for(int a =0; a< width*height; a++)
{
fread(&value, sizeof(int), 1, dosya);
loaded_data[a] = value;
}
fclose(dosya);
printf("Width : %d - Height : %d\n", width, height);
Vector2D * vec = CreateVector2DCPU(loaded_data, height, width);
return vec;
}
int main()
{
srand(time(0));
printf("MLP is being created....\n");
CreateMLP(3, 32754, 160, 4);
float error_val[32];
int batch_size = 32;
learning_rate = 0.000001;
Vector2D * data_set = load_text_data();
Vector2D * labels_ = load_label_data();
printf("\nData loaded...\n");
Vector2D * one_hot_labels = CreateOneHot(labels_, 4);
#define ITERATION_COUNT 15
double toplam= 0.0;
label_data = CreateVector2D(one_hot_labels->data, one_hot_labels->height, one_hot_labels->width, true);
batch_data = CreateVector2D(NULL, batch_size, data_set->width, false);
whole_input_data = CreateVector2D(data_set->data, data_set->height, data_set->width, true);
batch_label = CreateVector2D(NULL, batch_size, one_hot_labels->width, false);
for(int iteration = 1; iteration < ITERATION_COUNT; iteration++)
{
toplam = 0.0;
for(int j = 0; j < data_set->height/batch_size; j++)
{
hipLaunchKernelGGL(( PointerSet), dim3(1), dim3(1), 0, 0, batch_data, whole_input_data, j*batch_size, batch_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( PointerSet), dim3(1), dim3(1), 0, 0, batch_label, label_data, j*batch_size, batch_size);
hipDeviceSynchronize();
FeedForward(batch_data, data_set->width, batch_size);
hipDeviceSynchronize();
dim3 block(32, 32);
dim3 grids((one_hot_labels->width+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
hipLaunchKernelGGL(( Log2D), dim3(grids), dim3(block), 0, 0, final_output, final_output);
hipDeviceSynchronize();
hipLaunchKernelGGL(( MatrixPairwiseProduct), dim3(grids), dim3(block), 0, 0, final_output, batch_label, final_output);
hipDeviceSynchronize();
dim3 k(1, 32);
hipLaunchKernelGGL(( Sum2D), dim3(1), dim3(k), 0, 0, final_output);
hipDeviceSynchronize();
hipMemcpyFromSymbol( &error_val, error_sum, sizeof(float)*32);
//printf("\n\nIteration %d - Error : %f\n", iteration, error_val);
hipDeviceSynchronize();
for(int a=0;a<32;a++)
toplam += -error_val[a];
}
printf("\n\nIteration %d - Error : %f\n", iteration, toplam);
for(int j = 0; j < data_set->height/batch_size; j++)
{
hipLaunchKernelGGL(( PointerSet), dim3(1), dim3(1), 0, 0, batch_data, whole_input_data, j*batch_size, batch_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( PointerSet), dim3(1), dim3(1), 0, 0, batch_label, label_data, j*batch_size, batch_size);
hipDeviceSynchronize();
BackPropagate(batch_data, batch_label, data_set->width, one_hot_labels->width, batch_size);
hipDeviceSynchronize();
}
}
hipDeviceReset();
}
| ab5103cf77eff6200db3b54586563d7ba7ad4deb.cu | #include<stdio.h>
#include<string.h>
#include <stdlib.h>
#include <stdarg.h>
#include<time.h>
#include <math.h>
#include "MNIST_for_C-master/mnist.h"
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
// C language does not contain a boolean type, we are defining our type...
#define FALSE 0
#define TRUE 1
// All vectors/matrices are stored as this structure in the memory...
struct Vector2D
{
// Whole vector/matrix data is stored in one dimensional array...
// All numbers are floating point numbers....
//This pointer points where the vector/matrix data lyies....
float * data;
// Row number of the vector/matrix...
int height;
// Column number of the vector/matrix...
int width;
int size;
};
// We are defining a type from this structure definition...
typedef struct Vector2D Vector2D;
__device__ Vector2D array[5];
float * device_matrix_location;
Vector2D * CreateVector2D(float * data, int height, int width, bool fill = true, bool store = false)
{
// A new structure is allocated in GPU memory for matrix/vector...
Vector2D * temp ;
CHECK(cudaMalloc(&temp, sizeof(Vector2D)));
float * temp2;
CHECK(cudaMalloc(&temp2, sizeof(float)*height*width));
if(fill == true)
CHECK(cudaMemcpy(temp2, data, sizeof(float)*height*width, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&temp->data, &temp2, sizeof(float *), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&temp->height, (void *)(&height), sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&temp->width, (void *)(&width), sizeof(int), cudaMemcpyHostToDevice));
//temp->height = height;
//temp->width = width;
if(store == true)
device_matrix_location = temp2;
cudaDeviceSynchronize();
return temp;
}
__global__ void MatrixAdd(Vector2D * result, Vector2D * vec1, Vector2D * vec2)
{
if((vec1->width != vec2->width) || (vec1->height != vec2->height))
{
printf("\n\n**********Matrix add diff dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nMatrixAdd\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = vec1->data[tid] + vec2->data[tid];
}
}
__global__ void MatrixSubtract(Vector2D * result, Vector2D * vec1, Vector2D * vec2)
{
if((vec1->width != vec2->width) || (vec1->height != vec2->height))
{
printf("\n\n**********Matrix Subtract diff dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nMatrixSubtractvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nMatrixSubtract\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = vec1->data[tid] - vec2->data[tid];
}
}
__global__ void TransposeVector2D(Vector2D * res, Vector2D * m1)
{
if((res->width != m1->height) || (res->height != m1->width))
{
printf("\n\n**********Matrix Transpose diff dimensionç....");
printf("\nres->width : %d res->heihgt : %d - m1->width : %d m1->height %d\n", res->width, res->height, m1->width, m1->height);
return;
}
int thx = blockIdx.x*blockDim.x+ threadIdx.x;
int thy = blockIdx.y*blockDim.y+threadIdx.y;
int tid = thx + thy*m1->width;
if(tid ==0)
{
//printf("\nTransposeVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < m1->height) || (blockDim.x*gridDim.x<m1->width))
{
printf("\nTransposeVector2D\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < m1->width*m1->height)
{
res->data[thy+thx*m1->height] = m1->data[tid] ;
//printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
}
__global__ void DisplayVector2D(Vector2D * vector)
{
printf("[");
for(int h = 0; h < vector->height; h++)
{
printf("[");
for( int w = 0; w < vector->width-1; w++)
{
printf("%f, ", vector->data[h*vector->width+w]);
}
printf("%f], \n", vector->data[h*vector->width+vector->width-1]);
}
printf("]\n");
printf("Row : %d - Width : %d \n\n", vector->height, vector->width);
}
__global__ void MatrixProduct(Vector2D * result, Vector2D * m1, Vector2D * m2)
{
int thx = blockIdx.x*blockDim.x+ threadIdx.x;
int thy = blockIdx.y*blockDim.y+threadIdx.y;
if(thx == 0 && thy ==0){
if((m1->width != m2->height))
{
printf("\n\n**********Matrix Product error dimensionç....");
printf("\nm1->width %d m1->height %d - m2->width %d m2->height %d\n", m1->width, m1->height, m2->width, m2->height);
return;
}
//printf("\nMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", result->width, result->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if(thx ==0 && thy == 0)
if((blockDim.y*gridDim.y < result->height) || (blockDim.x*gridDim.x<result->width))
{
printf("\nMatrixProduct\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", result->width, result->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(thx < result->width && thy < result->height)
{
float toplam = 0;
for(int h = 0; h < m1->width; h++)
{
toplam += m1->data[thy*m1->width+h] * m2->data[h*m2->width+thx];
}
result->data[thy*result->width + thx] = toplam;
//printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
}
__global__ void ScalarMinusVector2D(Vector2D * result, float value, Vector2D * vec1)
{
if((result->width != vec1->width) || (result->height != vec1->height))
{
printf("\n\n**********Scaar Minus vectrordiff dimensionç....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nScalarMinusVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nScalarMinusVector2D\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = 1-vec1->data[tid];
}
}
__global__ void ScalarMatrixProduct(Vector2D * result, float scalar, Vector2D * vec1)
{
if((result->width != vec1->width) || (result->height != vec1->height))
{
printf("\n\n**********ScalarMatrixProduct dimensionç....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nScalarMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nScalarMatrixProduct\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = scalar*vec1->data[tid];
}
}
__global__ void MatrixPairwiseProduct(Vector2D * result, Vector2D * vec1, Vector2D * vec2)
{
if((vec1->width != vec2->width) || (vec1->height != vec2->height))
{
printf("\n\n**********MatrixPairwiseProduct dimension....");
return;
}
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nMatrixPairwiseProduct\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = vec1->data[tid] * vec2->data[tid];
}
}
__device__ float error_sum[32];
__global__ void Sum2D(Vector2D * vec)
{
int tid = threadIdx.y;
int val = 0;
int width = vec->width;
for(int a = 0; a < width; a++)
{
val += vec->data[a+tid*width];
}
error_sum[tid] = val;
}
__global__ void ArgMax2D(Vector2D * vector)
{
}
__global__ void Log2D(Vector2D * result, Vector2D * vec1)
{
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nLog2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nLog2D\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
float val;
if(tid < vec1->width*vec1->height)
{
val = log(vec1->data[tid]);
result->data[tid] = val;
}
}
__global__ void Exponential(Vector2D * result, Vector2D * vec1)
{
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nExponentialvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\Exponential\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = exp(vec1->data[tid]);
}
}
__global__ void Softmax(Vector2D * result, Vector2D * vec1)
{
int tid = blockIdx.y*blockDim.y + threadIdx.y;
if(tid ==0)
{
//printf("\nSoftmaxvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if(blockDim.y*gridDim.y < vec1->height)
{
printf("\nSoftmax\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->height)
{
float toplam = 0;
for(int a = 0; a < vec1->width;a++)
{
toplam += vec1->data[a+tid*vec1->width];
}
for(int a = 0; a < vec1->width;a++)
{
result->data[a+tid*vec1->width] = vec1->data[a+tid*vec1->width]/toplam;
}
}
}
__global__ void Sigmoid(Vector2D * result, Vector2D * vec1)
{
int tx = blockIdx.x*blockDim.x+ threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tid = ty*vec1->width+tx;
if(tid ==0)
{
//printf("\nSigmoidvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width))
{
printf("\nSigmoid\n");
printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y);
}
}
if(tid < vec1->width*vec1->height)
{
result->data[tid] = 1.0/(1.0 + exp(-(vec1->data[tid])));
}
}
__global__ void PointerSet(Vector2D * f1, Vector2D * f2, int shift, int batch_size)
{
f1->width = f2->width;
f1->height = batch_size;
f1->data = f2->data + f2->width*shift;
}
/*
*
* MLP functions....
*
*
*
*
*
*/
float generate_uniform(float a, float b)
{
return rand() / (RAND_MAX + 1.0) * (b - a) + a;
}
Vector2D ** mlp_structure_information;
Vector2D ** mlp_layer_output_structure;
Vector2D ** mlp_layer_bias_structure;
Vector2D ** weight_array;
int * layer_structure = NULL;
int mlp_layer_count = 0;
Vector2D ** layer_results;
Vector2D ** layer_updates;
Vector2D ** bias_array;
Vector2D ** bias_results;
Vector2D ** device_weight_array;
Vector2D ** layer_error_array;
Vector2D ** scalar_minus_array;
Vector2D ** bias_updates;
float learning_rate = 0.000001;
Vector2D * CreateWeightMatrix(int input_count, int output_count);
void CreateMLP(int layer_count, ...)
{
mlp_layer_count = layer_count;
weight_array = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D *));
bias_array = (Vector2D **)malloc((layer_count - 1 )*sizeof(Vector2D *));
bias_results = (Vector2D **)malloc((layer_count - 1 )*sizeof(Vector2D *));
bias_updates = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
layer_error_array = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
mlp_structure_information = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
mlp_layer_output_structure = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D));
//This will hold the layer values afte forward pass to be used in backpropagation...
layer_results = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D*));
layer_updates = (Vector2D **)malloc((layer_count-1)*sizeof(Vector2D*));
layer_structure = (int *)malloc(layer_count*sizeof(int));
va_list ap;
va_start(ap, layer_count);
for(int a=0; a<layer_count;a++)
{
layer_structure[a] = va_arg(ap, int);
}
va_end(ap);
printf("\nMLP structure\n");
for(int a=0; a<mlp_layer_count;a++)
printf("%d ", layer_structure[a]);
printf("\n\n");
for(int i=0; i<layer_count-1;i++)
{
//printf("\n\nLayer %d\n", i);
weight_array[i] = CreateWeightMatrix(layer_structure[i], layer_structure[i+1]);
layer_updates[i] = CreateVector2D(NULL, layer_structure[i], layer_structure[i+1], false);
bias_array[i] = CreateWeightMatrix(1, layer_structure[i+1]);
}
}
/*
Xavier He initialization will be used...
*/
Vector2D * CreateWeightMatrix(int input_count, int output_count)
{
float init_range = 0;
Vector2D * temp = (Vector2D *)malloc(sizeof(Vector2D));
Vector2D * device_temp;
CHECK(cudaMalloc(&device_temp, sizeof(Vector2D)));
temp->height = input_count; //For bias...
temp->width = output_count;
temp->data = (float * )malloc(sizeof(float)*(input_count)*output_count);
init_range = sqrt(2.0 / input_count);
for(int a=0; a<(input_count)*output_count; a++)
{
temp->data[a] = generate_uniform(-init_range, init_range);
}
float * temp2;
CHECK(cudaMalloc(&temp2, sizeof(float)*temp->height*temp->width));
CHECK(cudaMemcpy(temp2, temp->data, sizeof(float)*temp->height*temp->width, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&device_temp->data, &temp2, sizeof(float *), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&device_temp->height, &(temp->height), sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(&device_temp->width, &(temp->width), sizeof(int), cudaMemcpyHostToDevice));
return device_temp;
}
Vector2D * device_ones = NULL, * device_transpose_ones;
int res_height;
Vector2D * device_input;
Vector2D * device_input_temp;
Vector2D * final_output;
Vector2D * device_input_transpose;
dim3 grid;
void FeedForward(Vector2D * input, int input_width, int input_height)
{
int thread_block_x = 32, thread_block_y = 32;
//printf("\n\n**************Feedforward*********** width : %d height : %d\n", input_width, input_height);
// If there are more than sample in the input, the output of every layer is a matrix
// But the bias vector is just one vector... It should be added to every row of the output of layer
// To be able perform this operation, every bias vector is multiplied with a vector consisting of ones...
// By douing so a matrix form is obtained and this matrix is added to result
/*
Suppose a(0) = [1 2 3] and input = [1, 5] b(0)= [2 3] matmul(input, a(0)) = [23 35]
[4 5 6] [5, 6] [73 86]
[4, 6]
So for each sample the b(0) should be added to every row of matmul result
So we can do it by matrix multiplied by [1] column vector matmul([1], b(0)) = [2 3] by so it can be added..
[1] [1] [2 3]
1 is used because every layer 0th node is assumed to be 1 while adding bias...
*/
if(device_ones == NULL)
{
float * ones_ = (float *)malloc(sizeof(float)*input_height);
for(int i=0;i<input_height;i++)ones_[i] = 1.0;
device_ones = CreateVector2D(ones_, 1, input_height, true);
device_transpose_ones = CreateVector2D(ones_, input_height, 1, true);
res_height = input_height;
for(int current_layer = 1; current_layer < mlp_layer_count; current_layer++)
{
Vector2D * res = CreateVector2D(NULL, input_height, layer_structure[current_layer], false);
layer_results[current_layer-1] = res;
Vector2D * res2 = CreateVector2D(NULL, input_height, layer_structure[current_layer], false);
bias_results[current_layer-1] = res2;
}
device_input_temp = input;
device_input_transpose = CreateVector2D(NULL, input_width, input_height, false);
device_input = device_input_temp;
}
else
{
device_input = device_input_temp;
}
//printf("\nInput data : \n");
//DisplayVector2D<<<1, 1>>>(device_input);
//cudaDeviceSynchronize();
// By staring from first layer we are performing forward pass iteratively...
for(int current_layer=1; current_layer < mlp_layer_count; current_layer++)
{
{
// weight matrix of the layer and the previous input is multiplied
dim3 b(thread_block_x, thread_block_y);
dim3 grid( (layer_structure[current_layer]+b.x-1)/b.x, (input_height+b.y-1)/b.y);
/*
printf("\n\n\nLayer : %d\ndevice input : \n", current_layer-1);
DisplayVector2D<<<1, 1>>>(device_input);
cudaDeviceSynchronize();
printf("\weight input : \n");
DisplayVector2D<<<1, 1>>>(weight_array[current_layer-1]);
cudaDeviceSynchronize();
printf("\nresult vector .x %d .y %d\n", layer_structure[current_layer], res_height);
printf("\ngrid.x : %d - grid.y : %d\n", grid.x, grid.y);
*/
MatrixProduct<<<grid, b>>>(layer_results[current_layer-1], device_input, weight_array[current_layer-1]);
cudaDeviceSynchronize();
// Bias matrix is obtained...
MatrixProduct<<<grid, b>>>(bias_results[current_layer-1], device_transpose_ones, bias_array[current_layer-1]);
cudaDeviceSynchronize();
// The bias is added to matmul operation...
MatrixAdd<<<grid, b>>>(layer_results[current_layer-1], layer_results[current_layer-1], bias_results[current_layer-1]);
cudaDeviceSynchronize();
// If we are at output layer the hidden layer will be passed through the sigmoid function...
if(current_layer < mlp_layer_count -1){
Sigmoid<<<grid, b>>>(layer_results[current_layer-1], layer_results[current_layer-1]);
//input = sigmoid(temp);
cudaDeviceSynchronize();
}
// If at output we are softmaxing the last hidden layer output
else
{
Exponential<<<grid, b>>>(layer_results[current_layer-1], layer_results[current_layer-1]);
cudaDeviceSynchronize();
grid.x = 1; b.x = 1;
Softmax<<<grid, b>>>(layer_results[current_layer-1], layer_results[current_layer-1]);
cudaDeviceSynchronize();
//input = softmax(temp);
}
device_input = layer_results[current_layer-1];
}
}
final_output = device_input;
}
Vector2D * error;
Vector2D * label_data;
bool first_call = true;
Vector2D ** layer_weights_transpose;
Vector2D ** layer_results_transpose;
Vector2D * error_result;
Vector2D * batch_data;
Vector2D * batch_label;
Vector2D * whole_input_data;
void BackPropagate(Vector2D * input, Vector2D * labels, int input_width, int output_width, int batch_size)
{
//printf("\n\nBackPropagation\n\n");
// Firstly we are getting the outputs of each layer...
int thread_block_x = 32, thread_block_y = 32;
if(first_call == true)
{
first_call = false;
layer_weights_transpose = (Vector2D **)malloc(sizeof(Vector2D*)*(mlp_layer_count-1));
layer_results_transpose = (Vector2D **)malloc(sizeof(Vector2D*)*(mlp_layer_count-1));
scalar_minus_array = (Vector2D **)malloc(sizeof(Vector2D*)*(mlp_layer_count-1));
for(int a=0; a<mlp_layer_count-1;a++)
{
layer_weights_transpose[a] = CreateVector2D(NULL, layer_structure[a+1], layer_structure[a], false);
}
for(int current_layer = 1; current_layer < mlp_layer_count; current_layer++)
{
Vector2D * res = CreateVector2D(NULL, layer_structure[current_layer], batch_size, false);
layer_results_transpose[current_layer-1] = res;
Vector2D * res2 = CreateVector2D(NULL, batch_size, layer_structure[current_layer], false);
scalar_minus_array[current_layer-1] = res2;
}
for(int a = 1; a<mlp_layer_count;a++)
{
layer_error_array[a-1] = CreateVector2D(NULL, batch_size, layer_structure[a],false);
bias_updates[a-1] = CreateVector2D(NULL, 1, layer_structure[a], false);
}
error_result = CreateVector2D(NULL, batch_size, layer_structure[mlp_layer_count-1], false);
}
FeedForward(input, input_width, batch_size);
// We are calculating the error at outout....
dim3 block(thread_block_x, thread_block_y);
dim3 grid((output_width+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
MatrixSubtract<<<grid, block>>>(layer_error_array[mlp_layer_count-2], labels, final_output);
cudaDeviceSynchronize();
Vector2D * temp1;
// We are starting from output layer...
for( int current_layer = mlp_layer_count-2; current_layer>0;current_layer--)
{
// if we are output layer its weight should be adjust by simply
// performing matrix multiplication with the previous layer output and output errpr...
if(current_layer == mlp_layer_count-2)
{
// The previous layer's output is transposed...
dim3 block(thread_block_x, thread_block_y);
dim3 grid((layer_structure[current_layer]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
TransposeVector2D<<<grid, block>>>(layer_results_transpose[current_layer-1], layer_results[current_layer-1]);
cudaDeviceSynchronize();
// The bias also must be updated similar to normal weights
/*
We could have inserted the bias weights to normal layer weights matrix but in intermediate
layer while performing calculation column of 1 should be added and this brings some more extra work in memory..
If it weren't seperated the output of the previous layer output would be X(2) = [1 2 3]
[1 4 5]
[1 6 7]
while updating the weight we are taking transpose of the previous layer so that is why for seperate
bias updates we are performing matrix multiplication with bias. If we transpose the output above
we would have obtained [1 1 1] this one column vector comes from this fact...
[2 4 6]
[1 6 7]
*/
// We are storing the updates in an array of pointer later we will update our actual weights...
dim3 block2(layer_structure[mlp_layer_count-1] ,layer_structure[current_layer]);
dim3 grid2((layer_structure[mlp_layer_count-1]+block.x-1)/block.x, (layer_structure[current_layer]+block.y-1)/block.y);
MatrixProduct<<<grid2, block>>>(layer_updates[current_layer], layer_results_transpose[current_layer-1], layer_error_array[current_layer]);
cudaDeviceSynchronize();
dim3 temp(layer_structure[current_layer+1] ,1);
dim3 grid3((layer_structure[current_layer+1]+block.x-1)/block.x, (1+block.y-1)/block.y);
MatrixProduct<<<grid3, block>>>(bias_updates[current_layer], device_ones, layer_error_array[current_layer]);
cudaDeviceSynchronize();
continue;
}
temp1 = layer_error_array[current_layer+1] ;
dim3 block3(layer_structure[current_layer+2], layer_structure[current_layer+1]);
dim3 grid3((layer_structure[current_layer+2]+block.x-1)/block.x, (layer_structure[current_layer+1]+block.y-1)/block.y);
TransposeVector2D<<<grid3, block>>>(layer_weights_transpose[current_layer+1], weight_array[current_layer+1]);
cudaDeviceSynchronize();
// Error propagated to current layer is obtained by multiplying next layer's error with transpose of next layer's weight matrix
// later pairwisely multiplying the output of next layer and with 1 - next layer's output....
// error is propagated to next layer
dim3 block4(layer_structure[current_layer+1], batch_size );
dim3 grid4((layer_structure[current_layer+1]+block.x-1)/block.y, ( batch_size+block.y-1)/block.y);
MatrixProduct<<<grid4, block>>>(layer_error_array[current_layer], temp1, layer_weights_transpose[current_layer+1]);
cudaDeviceSynchronize();
dim3 block5(layer_structure[current_layer+1], batch_size);
dim3 grid5((layer_structure[current_layer+1]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
ScalarMinusVector2D<<<grid5, block>>>(scalar_minus_array[current_layer], 1, layer_results[current_layer]);
cudaDeviceSynchronize();
// When we multiply prev error with the next layer's output
/*
next layer output was obtained via sigmoid, derivative of the sigmoid is (sigmoid*(1-sigmoid))
While applying chaing rule this term takes places in the backprogation...
*/
// So multiplying it we are exactly calculating the next layer's error...
MatrixPairwiseProduct<<<grid5, block>>>(layer_error_array[current_layer], layer_error_array[current_layer], layer_results[current_layer]);
cudaDeviceSynchronize();
MatrixPairwiseProduct<<<grid5, block>>>(layer_error_array[current_layer], layer_error_array[current_layer], scalar_minus_array[current_layer]);
cudaDeviceSynchronize();
// We are transposing the prev layer output to calculate current layer's weight change...
dim3 block6(layer_structure[current_layer], batch_size);
dim3 grid6((layer_structure[current_layer]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
TransposeVector2D<<<grid6, block>>>(layer_results_transpose[current_layer-1], layer_results[current_layer - 1 ]);
cudaDeviceSynchronize();
dim3 block7(layer_structure[current_layer+1], layer_structure[current_layer]);
dim3 grid7((layer_structure[current_layer+1]+block.x-1)/block.x, (layer_structure[current_layer]+block.y-1)/block.y);
MatrixProduct<<<grid7, block>>>(layer_updates[current_layer], layer_results_transpose[current_layer-1], layer_error_array[current_layer]);
cudaDeviceSynchronize();
// Bias is also updated according to explanation above....
dim3 block8(layer_structure[current_layer+1], 1);
dim3 grid8((layer_structure[current_layer+1]+block.x-1)/block.x, (1+block.y-1)/block.y);
MatrixProduct<<<grid8, block>>>( bias_updates[current_layer], device_ones, layer_error_array[current_layer]);
cudaDeviceSynchronize();
}
temp1 = layer_error_array[1];
dim3 block9(layer_structure[2], layer_structure[1]);
dim3 grid9((layer_structure[2]+block.x-1)/block.x, (layer_structure[1]+block.y-1)/block.y);
TransposeVector2D<<<grid9, block>>>(layer_weights_transpose[1], weight_array[1]);
cudaDeviceSynchronize();
dim3 block10(layer_structure[1], batch_size );
dim3 grid10((layer_structure[1]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
MatrixProduct<<<grid10, block>>>(layer_error_array[0], temp1, layer_weights_transpose[1]);
cudaDeviceSynchronize();
dim3 block11(layer_structure[0+1], batch_size);
dim3 grid11((layer_structure[0+1]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
ScalarMinusVector2D<<<grid11, block>>>(scalar_minus_array[0], 1, layer_results[0]);
cudaDeviceSynchronize();
MatrixPairwiseProduct<<<grid11, block>>>(layer_error_array[0], layer_error_array[0], layer_results[0]);
cudaDeviceSynchronize();
MatrixPairwiseProduct<<<grid11, block>>>(layer_error_array[0], layer_error_array[0], scalar_minus_array[0]);
cudaDeviceSynchronize();
dim3 block12(layer_structure[0], batch_size);
dim3 grid12((layer_structure[0]+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
TransposeVector2D<<<grid12, block>>>(device_input_transpose , batch_data);
cudaDeviceSynchronize();
dim3 block13(layer_structure[1], layer_structure[0]);
dim3 grid13((layer_structure[1]+block.x-1)/block.x, (layer_structure[0]+block.y-1)/block.y);
MatrixProduct<<<grid13, block>>>(layer_updates[0], device_input_transpose, layer_error_array[0]);
cudaDeviceSynchronize();
dim3 block14(layer_structure[0+1], 1);
dim3 grid14((layer_structure[0+1]+block.x-1)/block.x, (1+block.y-1)/block.y);
MatrixProduct<<<grid14, block>>>( bias_updates[0], device_ones, layer_error_array[0]);
cudaDeviceSynchronize();
for(int weight_index=0; weight_index < mlp_layer_count - 1;weight_index++)
{
dim3 grid((layer_structure[weight_index+1]+block.x-1)/block.x, (layer_structure[weight_index]+block.y-1)/block.y);
ScalarMatrixProduct<<<grid, block>>>(layer_updates[weight_index], learning_rate, layer_updates[weight_index]);
cudaDeviceSynchronize();
MatrixAdd<<<grid, block>>>(weight_array[weight_index], weight_array[weight_index], layer_updates[weight_index]);
cudaDeviceSynchronize();
dim3 grid2((layer_structure[weight_index+1]+block.x-1)/block.x ,1);
ScalarMatrixProduct<<<grid2, block>>>(bias_updates[weight_index], learning_rate, bias_updates[weight_index]);
cudaDeviceSynchronize();
MatrixAdd<<<grid2, block>>>(bias_array[weight_index], bias_array[weight_index], bias_updates[weight_index]);
cudaDeviceSynchronize();
}
}
Vector2D * CreateVector2DCPU(float * data, int height, int width)
{
// A new structure is allocated in memory for matrix/vector...
Vector2D * temp = (Vector2D *)malloc(sizeof(struct Vector2D));
temp->data = data;
temp->height = height;
temp->width = width;
return temp;
};
Vector2D * CreateOneHot(Vector2D * indexes, int vector_length)
{
Vector2D * one_hot_vector = (Vector2D*)malloc(sizeof(Vector2D));
one_hot_vector->height = indexes->height;
one_hot_vector->width = vector_length;
one_hot_vector->size = one_hot_vector->height;
one_hot_vector->data = (float *)malloc(sizeof(float)*indexes->height*vector_length);
memset(one_hot_vector->data, 0, sizeof(float)*indexes->height*vector_length);
for(int i=0; i<one_hot_vector->height;i++)
{
one_hot_vector->data[i*vector_length+(int)indexes->data[i*indexes->width]] = 1.0;
}
return one_hot_vector;
}
#define kk 10
void read_mnist_(const char *file_path, int num_data, int len_info, int arr_n, unsigned char data_char[][kk], int info_arr[])
{
}
void DisplayVector2DCPU(Vector2D * vector)
{
printf("[");
for(int h = 0; h < vector->height; h++)
{
printf("[");
for( int w = 0; w < vector->width-1; w++)
{
printf("%f, ", vector->data[h*vector->width+w]);
}
printf("%f], \n", vector->data[h*vector->width+vector->width-1]);
}
printf("\b\b\b]");
}
#define EMPTY printf("\n\n");
Vector2D * load_text_data()
{
FILE * dosya = fopen("text_data.dat", "rb");
int width, height;
fread(&width, sizeof(int), 1, dosya);
fread(&height, sizeof(int), 1, dosya);
float * loaded_data = (float *)malloc(width*height*sizeof(float));
for(int a =0; a< width*height; a++)
fread(&loaded_data[a], sizeof(float), 1, dosya);
fclose(dosya);
printf("Width : %d - Height : %d\n", width, height);
Vector2D * vec = CreateVector2DCPU(loaded_data, height, width);
return vec;
}
Vector2D * load_label_data()
{
FILE * dosya = fopen("label_data.dat", "rb");
int width, height;
fread(&width, sizeof(int), 1, dosya);
fread(&height, sizeof(int), 1, dosya);
float * loaded_data = (float *)malloc(width*height*sizeof(float));
int value;
for(int a =0; a< width*height; a++)
{
fread(&value, sizeof(int), 1, dosya);
loaded_data[a] = value;
}
fclose(dosya);
printf("Width : %d - Height : %d\n", width, height);
Vector2D * vec = CreateVector2DCPU(loaded_data, height, width);
return vec;
}
int main()
{
srand(time(0));
printf("MLP is being created....\n");
CreateMLP(3, 32754, 160, 4);
float error_val[32];
int batch_size = 32;
learning_rate = 0.000001;
Vector2D * data_set = load_text_data();
Vector2D * labels_ = load_label_data();
printf("\nData loaded...\n");
Vector2D * one_hot_labels = CreateOneHot(labels_, 4);
#define ITERATION_COUNT 15
double toplam= 0.0;
label_data = CreateVector2D(one_hot_labels->data, one_hot_labels->height, one_hot_labels->width, true);
batch_data = CreateVector2D(NULL, batch_size, data_set->width, false);
whole_input_data = CreateVector2D(data_set->data, data_set->height, data_set->width, true);
batch_label = CreateVector2D(NULL, batch_size, one_hot_labels->width, false);
for(int iteration = 1; iteration < ITERATION_COUNT; iteration++)
{
toplam = 0.0;
for(int j = 0; j < data_set->height/batch_size; j++)
{
PointerSet<<<1, 1>>>(batch_data, whole_input_data, j*batch_size, batch_size);
cudaDeviceSynchronize();
PointerSet<<<1, 1>>>(batch_label, label_data, j*batch_size, batch_size);
cudaDeviceSynchronize();
FeedForward(batch_data, data_set->width, batch_size);
cudaDeviceSynchronize();
dim3 block(32, 32);
dim3 grids((one_hot_labels->width+block.x-1)/block.x, (batch_size+block.y-1)/block.y);
Log2D<<<grids, block>>>(final_output, final_output);
cudaDeviceSynchronize();
MatrixPairwiseProduct<<<grids, block>>>(final_output, batch_label, final_output);
cudaDeviceSynchronize();
dim3 k(1, 32);
Sum2D<<<1, k>>>(final_output);
cudaDeviceSynchronize();
cudaMemcpyFromSymbol( &error_val, error_sum, sizeof(float)*32);
//printf("\n\nIteration %d - Error : %f\n", iteration, error_val);
cudaDeviceSynchronize();
for(int a=0;a<32;a++)
toplam += -error_val[a];
}
printf("\n\nIteration %d - Error : %f\n", iteration, toplam);
for(int j = 0; j < data_set->height/batch_size; j++)
{
PointerSet<<<1, 1>>>(batch_data, whole_input_data, j*batch_size, batch_size);
cudaDeviceSynchronize();
PointerSet<<<1, 1>>>(batch_label, label_data, j*batch_size, batch_size);
cudaDeviceSynchronize();
BackPropagate(batch_data, batch_label, data_set->width, one_hot_labels->width, batch_size);
cudaDeviceSynchronize();
}
}
cudaDeviceReset();
}
|
4d20be32d6077ff73b74b78d2948ccaede206f1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ComputeDistanceKernel( float *symbolVectors, float *inputVector, float *distance, int symbolSize, int symbols )
{
int symbolId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(symbolId < symbols)
{
float sum = 0.00f;
for(int i = 0; i < symbolSize; i++)
{
sum += symbolVectors[symbolId * symbolSize + i] * inputVector[i];
}
distance[symbolId] = sum;
}
} | 4d20be32d6077ff73b74b78d2948ccaede206f1e.cu | #include "includes.h"
__global__ void ComputeDistanceKernel( float *symbolVectors, float *inputVector, float *distance, int symbolSize, int symbols )
{
int symbolId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(symbolId < symbols)
{
float sum = 0.00f;
for(int i = 0; i < symbolSize; i++)
{
sum += symbolVectors[symbolId * symbolSize + i] * inputVector[i];
}
distance[symbolId] = sum;
}
} |
9bbc552f657c5e01483aac4bf4f58dcbf620de61.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_fmax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
REAL *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_fmax), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_fmax), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_fmax), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9bbc552f657c5e01483aac4bf4f58dcbf620de61.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_fmax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
REAL *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_fmax<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_fmax<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_fmax<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c10d1878c5d90d00d2fbfa9a423756478681ff0a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 10
__global__ void min(int *input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]<input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void max(int *input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]>input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void summation(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
}
__global__ void average(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
input[0] = input[0]/10;
}
__global__ void standardDeviation(int *input,int mean)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
int std = 0;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
std = ((input[first]-mean)*(input[first]-mean))+((input[second]-mean)*(input[second]-mean));
}
step_size*=2;
number_of_threads/=2;
}
input[0] = std;
}
int main()
{
int input[SIZE],i;
for( i = 0 ; i < SIZE ; i++)
{
input[i] = rand()% 100;
}
for( i = 0 ; i < SIZE ; i++)
{
printf("%d ",input[i]);
}
printf("\n");
int byte_size = SIZE*sizeof(int);
//Allocate mem for min
//<<<blcoksPerGrid,threadsPerBlock>>>
int *arr_min, result_min;
hipMalloc(&arr_min,byte_size);
hipMemcpy(arr_min,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( min), dim3(1),dim3(SIZE/2), 0, 0, arr_min);
hipMemcpy(&result_min,arr_min,sizeof(int),hipMemcpyDeviceToHost);
printf("Minimun: %d\n",result_min);
//Allocate mem for max
int *arr_max, result_max;
hipMalloc(&arr_max,byte_size);
hipMemcpy(arr_max,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE/2), 0, 0, arr_max);
hipMemcpy(&result_max,arr_max,sizeof(int),hipMemcpyDeviceToHost);
printf("Maximum: %d\n",result_max);
//Allocate mem for sum
int *arr_sum, sum;
hipMalloc(&arr_sum,byte_size);
hipMemcpy(arr_sum,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( summation), dim3(1),dim3(SIZE), 0, 0, arr_sum);
hipMemcpy(&sum,arr_sum,sizeof(int),hipMemcpyDeviceToHost);
printf("Sum: %d\n",sum);
//Allocate mem for avg
int *arr_avg, avg;
hipMalloc(&arr_avg,byte_size);
hipMemcpy(arr_avg,input,byte_size,hipMemcpyHostToDevice);
//<<<blcoksPerGrid,threadsPerBlock>>>
hipLaunchKernelGGL(( average), dim3(1),dim3(SIZE), 0, 0, arr_avg);
hipMemcpy(&avg,arr_avg,sizeof(int),hipMemcpyDeviceToHost);
printf("Average: %d\n",avg);
printf("CPUAVG: %d\n",(sum/SIZE));
//Allcate mem for std
int *arr_std, std;
const int mean = avg;
hipMalloc(&arr_std,byte_size);
hipMemcpy(arr_std,input,byte_size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( standardDeviation), dim3(1),dim3(SIZE), 0, 0, arr_std,mean);
hipMemcpy(&std,arr_std,sizeof(int),hipMemcpyDeviceToHost);
std = sqrt(std/10);
printf("Standard Deviation: %d\n",std);
return 0;
}
| c10d1878c5d90d00d2fbfa9a423756478681ff0a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#define SIZE 10
__global__ void min(int *input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]<input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void max(int *input)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
int first = tid*step_size*2;
int second = first+step_size;
if(input[second]>input[first])
input[first]=input[second];
}
step_size*= 2;
number_of_threads/=2;
}
}
__global__ void summation(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
}
__global__ void average(int *input)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
input[first] += input[second];
}
step_size*=2;
number_of_threads/=2;
}
input[0] = input[0]/10;
}
__global__ void standardDeviation(int *input,int mean)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
int std = 0;
while(number_of_threads>0){
if(tid<number_of_threads){
const int first = tid*step_size*2;
const int second = first+step_size;
std = ((input[first]-mean)*(input[first]-mean))+((input[second]-mean)*(input[second]-mean));
}
step_size*=2;
number_of_threads/=2;
}
input[0] = std;
}
int main()
{
int input[SIZE],i;
for( i = 0 ; i < SIZE ; i++)
{
input[i] = rand()% 100;
}
for( i = 0 ; i < SIZE ; i++)
{
printf("%d ",input[i]);
}
printf("\n");
int byte_size = SIZE*sizeof(int);
//Allocate mem for min
//<<<blcoksPerGrid,threadsPerBlock>>>
int *arr_min, result_min;
cudaMalloc(&arr_min,byte_size);
cudaMemcpy(arr_min,input,byte_size,cudaMemcpyHostToDevice);
min<<<1,SIZE/2>>>(arr_min);
cudaMemcpy(&result_min,arr_min,sizeof(int),cudaMemcpyDeviceToHost);
printf("Minimun: %d\n",result_min);
//Allocate mem for max
int *arr_max, result_max;
cudaMalloc(&arr_max,byte_size);
cudaMemcpy(arr_max,input,byte_size,cudaMemcpyHostToDevice);
max<<<1,SIZE/2>>>(arr_max);
cudaMemcpy(&result_max,arr_max,sizeof(int),cudaMemcpyDeviceToHost);
printf("Maximum: %d\n",result_max);
//Allocate mem for sum
int *arr_sum, sum;
cudaMalloc(&arr_sum,byte_size);
cudaMemcpy(arr_sum,input,byte_size,cudaMemcpyHostToDevice);
summation<<<1,SIZE>>>(arr_sum);
cudaMemcpy(&sum,arr_sum,sizeof(int),cudaMemcpyDeviceToHost);
printf("Sum: %d\n",sum);
//Allocate mem for avg
int *arr_avg, avg;
cudaMalloc(&arr_avg,byte_size);
cudaMemcpy(arr_avg,input,byte_size,cudaMemcpyHostToDevice);
//<<<blcoksPerGrid,threadsPerBlock>>>
average<<<1,SIZE>>>(arr_avg);
cudaMemcpy(&avg,arr_avg,sizeof(int),cudaMemcpyDeviceToHost);
printf("Average: %d\n",avg);
printf("CPUAVG: %d\n",(sum/SIZE));
//Allcate mem for std
int *arr_std, std;
const int mean = avg;
cudaMalloc(&arr_std,byte_size);
cudaMemcpy(arr_std,input,byte_size,cudaMemcpyHostToDevice);
standardDeviation<<<1,SIZE>>>(arr_std,mean);
cudaMemcpy(&std,arr_std,sizeof(int),cudaMemcpyDeviceToHost);
std = sqrt(std/10);
printf("Standard Deviation: %d\n",std);
return 0;
}
|
c3c215a70f86d94403eeb3c351e8b46008b0363e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 128;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
//allocate temporary array for printing
double* mem = (double*) malloc(sizeof (double) *size);
//transfer data from device
hipMemcpy(mem, array_GPU, sizeof (double) *size, hipMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
check_error(hipMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(hipMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice));
check_error(hipMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, hipMemcpyHostToDevice));
check_error(hipMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
hipDeviceSynchronize();
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
check_error(hipMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(hipMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(hipMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, hipMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| c3c215a70f86d94403eeb3c351e8b46008b0363e.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 128;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
double elapsed_time(long long start_time, long long end_time) {
return (double) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
void cuda_print_double_array(double *array_GPU, size_t size) {
//allocate temporary array for printing
double* mem = (double*) malloc(sizeof (double) *size);
//transfer data from device
cudaMemcpy(mem, array_GPU, sizeof (double) *size, cudaMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a double representing the sum
********************************/
__device__ double calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
double likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((double) (I[ind[index * numOnes + x]] - 100), 2) - pow((double) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(double * CDF, double * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a double representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ double d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
double test_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ double d_randn(int * seed, int index) {
//Box-Muller algortihm
double pi = 3.14159265358979323846;
double u = d_randu(seed, index);
double v = d_randu(seed, index);
double cosine = cos(2 * pi * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ double updateWeights(double * weights, double * likelihood, int Nparticles) {
int x;
double sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original double version.
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ double dev_round_double(double value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, double * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((double) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(double * weights, int Nparticles, double* partial_sums, double * CDF, double * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ double u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((double) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((double) (Nparticles));
}
}
__global__ void sum_kernel(double* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
double sum = 0.0;
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(double * arrayX, double * arrayY, double * xj, double * yj, double * CDF, int * ind, int * objxy, double * likelihood, unsigned char * I, double * u, double * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, double* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ double buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((double) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_double() to be consistent with roundDouble
indX = dev_round_double(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_double(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
double xe = roundDouble(IszY / 2.0);
double ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
double * weights = (double *) malloc(sizeof (double) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((double) (Nparticles));
}
//initial likelihood to 0.0
double * likelihood = (double *) malloc(sizeof (double) *Nparticles);
double * arrayX = (double *) malloc(sizeof (double) *Nparticles);
double * arrayY = (double *) malloc(sizeof (double) *Nparticles);
double * xj = (double *) malloc(sizeof (double) *Nparticles);
double * yj = (double *) malloc(sizeof (double) *Nparticles);
double * CDF = (double *) malloc(sizeof (double) *Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
double * likelihood_GPU;
unsigned char * I_GPU;
double * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
double * u = (double *) malloc(sizeof (double) *Nparticles);
double * u_GPU;
int * seed_GPU;
double* partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof (double) *Nparticles));
//set likelihood to zero
check_error(cudaMemset((void *) likelihood_GPU, 0, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof (double) *Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof (double) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(cudaMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(objxy_GPU, objxy, sizeof (int) *2 * countOnes, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(weights_GPU, weights, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(xj_GPU, xj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(yj_GPU, yj, sizeof (double) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((double) Nparticles / (double) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
cudaThreadSynchronize();
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
check_error(cudaMemcpy(arrayX, arrayX_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(cudaMemcpy(arrayY, arrayY_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(cudaMemcpy(weights, weights_GPU, sizeof (double) *Nparticles, cudaMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt(pow((double) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((double) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "double.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
29850a6d1658d9b829a867d5cd543568358c7289.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "separableconv_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void SeparableConvLayer_gpu_forward_kernelfunc(
const int nElement,
const int w, const int h, const int channel, const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w - filter_size + 1;
const bool withinYbounds = h_i < h - filter_size + 1;
const int batch_i = blockIdx.z;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
for ( int c_i = 0 ; c_i < channel ; c_i ++){
float out = 0.0f;
for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) {
for (int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) {
float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)];
float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ];
float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ];
out += temp1* temp2 * temp3;
}
}
output[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ] = out;
}
}
return ;
}
template <typename scalar_t>
__global__ void SeparableConvLayer_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel, const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3,
const scalar_t* __restrict__ gradoutput, scalar_t* gradinput1, scalar_t* gradinput2, scalar_t* gradinput3
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w - filter_size + 1;
const bool withinYbounds = h_i < h - filter_size + 1;
const int batch_i = blockIdx.z;
if(withinXbounds && withinYbounds){
for (int c_i = 0 ; c_i < channel ; c_i ++){
for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) {
for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) {
float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)];
float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ];
float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ];
float gradout = gradoutput[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ];
atomicAdd(&gradinput1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)],
gradout * temp2 * temp3);
atomicAdd(&gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ],
gradout * temp1 * temp3);
atomicAdd(&gradinput3 [batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] ,
gradout * temp1 * temp2);
}
}
}
}
return ;
}
int SeparableConvLayer_gpu_forward_kernel(
hipStream_t stream,
const int nElement,
const int w, const int h, const int channel, const int batch,const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {hipLaunchKernelGGL((
SeparableConvLayer_gpu_forward_kernelfunc), dim3(grid),dim3(block),0, stream ,
nElement, //to let the nummous
w,h,channel, filter_size,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride,
output_b_stride,output_c_stride,output_h_stride,output_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),input3.data<scalar_t>(), output.data<scalar_t>()
);
}));
// THCudaCheck(hipGetLastError());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
error = 0;
return error;
}
int SeparableConvLayer_gpu_backward_kernel(
hipStream_t stream,
const int nElement,
const int w, const int h, const int channel, const int batch, const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
at::Tensor& input1, at::Tensor& input2, at::Tensor& input3,
at::Tensor& gradoutput, at::Tensor& gradinput1, at::Tensor& gradinput2, at::Tensor& gradinput3
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// hipMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float));
// hipMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float));
// hipMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float));
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {hipLaunchKernelGGL((
SeparableConvLayer_gpu_backward_kernelfunc) , dim3(grid),dim3(block),0, stream,
nElement, //to let the nummous
w,h,channel, filter_size,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride,
output_b_stride,output_c_stride,output_h_stride,output_w_stride,
input1.data<scalar_t>(), input2.data<scalar_t>(), input3.data<scalar_t>(), gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>(), gradinput3.data<scalar_t>()
);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("gpuerror in BilinearSampler.updateGradInput %s\n", hipGetErrorString(err));
//THError("aborting");
return error;
}
error = 0;
return error;
} | 29850a6d1658d9b829a867d5cd543568358c7289.cu | #include <stdio.h>
#include "separableconv_cuda_kernel.cuh"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
#define DEBUG (0)
#ifndef BLOCKDIMX
#define BLOCKDIMX (32)
#endif
#ifndef BLOCKDIMY
#define BLOCKDIMY (16)
#endif
using at::Half;
//forward path of our layer
template <typename scalar_t>
__global__ void SeparableConvLayer_gpu_forward_kernelfunc(
const int nElement,
const int w, const int h, const int channel, const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3, scalar_t* output
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
//only use one dimensioon of the grid and block
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w - filter_size + 1;
const bool withinYbounds = h_i < h - filter_size + 1;
const int batch_i = blockIdx.z;
// __syncthreads();
// const float fillvalue =0.0f;
if( withinXbounds && withinYbounds) {
for ( int c_i = 0 ; c_i < channel ; c_i ++){
float out = 0.0f;
for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) {
for (int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) {
float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)];
float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ];
float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ];
out += temp1* temp2 * temp3;
}
}
output[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ] = out;
}
}
return ;
}
template <typename scalar_t>
__global__ void SeparableConvLayer_gpu_backward_kernelfunc(
const int nElement, const int w, const int h, const int channel, const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
const scalar_t* __restrict__ input1, const scalar_t* __restrict__ input2, const scalar_t* __restrict__ input3,
const scalar_t* __restrict__ gradoutput, scalar_t* gradinput1, scalar_t* gradinput2, scalar_t* gradinput3
)
{
//blockIdx.z : batch index from 0~B-1
//blockIdx.y : height patch index from ceil(h/16)
//blockIdx.x : width patch index from ceil(w/32)
//threadidx.x: width index 0~31
//threadIdx.y: height index 0~15
//threadIdx.z: Not used
const int w_i = blockIdx.x * blockDim.x + threadIdx.x;
const int h_i = blockIdx.y * blockDim.y + threadIdx.y;
const bool withinXbounds = w_i < w - filter_size + 1;
const bool withinYbounds = h_i < h - filter_size + 1;
const int batch_i = blockIdx.z;
if(withinXbounds && withinYbounds){
for (int c_i = 0 ; c_i < channel ; c_i ++){
for (int intFilterY = 0; intFilterY < filter_size; intFilterY += 1) {
for ( int intFilterX = 0; intFilterX < filter_size; intFilterX += 1) {
float temp1 = input1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)];
float temp2 = input2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ];
float temp3 = input3[batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ];
float gradout = gradoutput[batch_i * output_b_stride + c_i* output_c_stride + h_i * output_h_stride + w_i ];
atomicAdd(&gradinput1[batch_i * input1_b_stride + c_i * input1_c_stride + (h_i + intFilterY )* input1_h_stride + (w_i + intFilterX)],
gradout * temp2 * temp3);
atomicAdd(&gradinput2[batch_i * input2_b_stride + intFilterY * input2_c_stride + h_i * input2_h_stride + w_i ],
gradout * temp1 * temp3);
atomicAdd(&gradinput3 [batch_i * input3_b_stride + intFilterX * input3_c_stride + h_i * input3_h_stride + w_i ] ,
gradout * temp1 * temp2);
}
}
}
}
return ;
}
int SeparableConvLayer_gpu_forward_kernel(
cudaStream_t stream,
const int nElement,
const int w, const int h, const int channel, const int batch,const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
at::Tensor& input1, at::Tensor& input2, at::Tensor& input3, at::Tensor& output
)
{
int error = 1 ;
dim3 grid;
dim3 block;
// blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1 + BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
//extract the data of CudaTensor and use kernel to calculate.
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {
SeparableConvLayer_gpu_forward_kernelfunc<<<grid,block,0, stream >>>(
nElement, //to let the nummous
w,h,channel, filter_size,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride,
output_b_stride,output_c_stride,output_h_stride,output_w_stride,
input1.data<scalar_t>(),input2.data<scalar_t>(),input3.data<scalar_t>(), output.data<scalar_t>()
);
}));
// THCudaCheck(cudaGetLastError());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
error = 0;
return error;
}
int SeparableConvLayer_gpu_backward_kernel(
cudaStream_t stream,
const int nElement,
const int w, const int h, const int channel, const int batch, const int filter_size,
const int input1_b_stride, const int input1_c_stride, const int input1_h_stride, const int input1_w_stride,
const int input2_b_stride, const int input2_c_stride, const int input2_h_stride, const int input2_w_stride,
const int input3_b_stride, const int input3_c_stride, const int input3_h_stride, const int input3_w_stride,
const int output_b_stride, const int output_c_stride, const int output_h_stride, const int output_w_stride,
at::Tensor& input1, at::Tensor& input2, at::Tensor& input3,
at::Tensor& gradoutput, at::Tensor& gradinput1, at::Tensor& gradinput2, at::Tensor& gradinput3
)
{
int error = 1 ;
dim3 grid;
dim3 block;
//blockthread = 128;
//the threadIdx.x is sheduled first, then threadIdx.y, threadIdx.z
//the three channels are processsed in one kernel
block = dim3(BLOCKDIMX,BLOCKDIMY,1);
grid = dim3( (w - filter_size + 1 + BLOCKDIMX - 1)/ BLOCKDIMX, (h - filter_size + 1+ BLOCKDIMY - 1) / BLOCKDIMY, batch);
if(BLOCKDIMX != 32 || BLOCKDIMY != 16||DEBUG)
printf("BLOCKDIMX revised to %d, BLOCKDIMY revised to %d \n", BLOCKDIMX,BLOCKDIMY);
// cudaMemset((void*)gradinput1, 0, input1_b_stride * batch * sizeof(float));
// cudaMemset((void*)gradinput2, 0, input2_b_stride * batch * sizeof(float));
// cudaMemset((void*)gradinput3, 0, input3_b_stride * batch * sizeof(float));
AT_DISPATCH_FLOATING_TYPES(input1.type(), "DepthFlowProjection_gpu_backward", ([&] {
SeparableConvLayer_gpu_backward_kernelfunc <<<grid,block,0, stream>>>(
nElement, //to let the nummous
w,h,channel, filter_size,
input1_b_stride,input1_c_stride,input1_h_stride,input1_w_stride,
input2_b_stride,input2_c_stride,input2_h_stride,input2_w_stride,
input3_b_stride,input3_c_stride,input3_h_stride,input3_w_stride,
output_b_stride,output_c_stride,output_h_stride,output_w_stride,
input1.data<scalar_t>(), input2.data<scalar_t>(), input3.data<scalar_t>(), gradoutput.data<scalar_t>(),
gradinput1.data<scalar_t>(), gradinput2.data<scalar_t>(), gradinput3.data<scalar_t>()
);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("gpuerror in BilinearSampler.updateGradInput %s\n", cudaGetErrorString(err));
//THError("aborting");
return error;
}
error = 0;
return error;
} |
57530e925a68765d83e3e99c42a4657e696a2014.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpuLS.cuh"
/*
mode:
= 1 -> master -> creates shared memory
= 0 -> slave -> doesn't create the shared memory
*/
//LS
//Y = 16 x 1024
//X = 1 x 1023
//H = 16 x 1023
using namespace std;
gpuLS::gpuLS() {
//Shared Memory
std::string shm_uid = shmemID;
buffPtr = new ShMemSymBuff(shm_uid, mode);
}
gpuLS::~gpuLS() {}
//Reads in Vector X from file -> 1xcols
void gpuLS::matrix_readX(cuFloatComplex* X, int cols){
ifstream inFile;
inFile.open(fileNameForX);
if (!inFile) {
cerr << "Unable to open file "<< fileNameForX<<", filling in 1+i for x\n";
float c=1.0f;
for (int col = 0; col < cols; col++){
X[col].x = c;
X[col].y = c;
}
return;
}
inFile.read((char*)X, (cols)*sizeof(*X));
/*
float c=0;
for (int col = 0; col < cols; col++){
inFile >> c;
X[col].real=c;
inFile >> c;
X[col].imag=c;
}
*/
cuFloatComplex* temp = 0;
temp=(cuFloatComplex*)malloc ((cols-1)/2* sizeof (*temp));
//copy second half to temp
memmove(temp, &X[(cols+1)/2], (cols-1)/2* sizeof (*X));
//copy first half to second half
memmove(&X[(cols-1)/2], X, (cols+1)/2* sizeof (*X));
//copy temp to first half
memmove(X, temp, (cols-1)/2* sizeof (*X));
free(temp);
inFile.close();
}
void gpuLS::copyPilotToGPU(cuFloatComplex* dX, int rows, int cols) {
//X = 1x1023 -> later can become |H|^2
cuFloatComplex* X = 0;
int sizeX=rows*(cols-1)* sizeof(*X);
X = (cuFloatComplex*)malloc(sizeX);
//cuFloatComplex* H =0;
//H = (cuFloatComplex *)malloc(sizeX*rows);
//hipMalloc((void**)&H, size);
//Read in X vector -> 1x1023
for (int i = 0; i < rows; i++) {
//std::cout << "Here...\n";
matrix_readX(&X[i*(cols-1)], cols-1);
}
//std::cout << "Here...\n";
hipMemcpy(dX, X, rows*(cols-1)*sizeof(*dX), hipMemcpyHostToDevice);
hipDeviceSynchronize();
free(X);
}
__global__ void shiftOneRow(cuFloatComplex* Y, int cols1, int rows1){
int cols = cols1;
//int rows = rows1;
int col = threadIdx.y*cols + threadIdx.x;
int tid = blockIdx.y*gridDim.x*blockDim.y*cols + blockIdx.x*blockDim.y*cols + threadIdx.y*cols + threadIdx.x;
extern __shared__ cuFloatComplex temp[];
if ((threadIdx.x + blockIdx.x*blockDim.x) < (cols+1)/2) {
temp[col] = Y[tid+((cols-1)/2)];
} else if ((threadIdx.x + blockIdx.x*blockDim.x) >= (cols+1)/2 and (threadIdx.x + blockIdx.x*blockDim.x) < cols) {
temp[col] = Y[tid-((cols+1)/2)];
}
__syncthreads();
Y[tid] = temp[col];
__syncthreads();
}
void gpuLS::shiftOneRowCPU(cuFloatComplex* Y, int cols, int row){
cuFloatComplex* Yf = &Y[row*cols];
//std::cout << "Here...\n";
cuFloatComplex* temp = 0;
temp=(cuFloatComplex*)malloc ((cols+1)/2* sizeof (*temp));
//copy second half to temp
memmove(temp, &Yf[(cols-1)/2], (cols+1)/2* sizeof (*Yf));
//copy first half to second half
memmove(&Yf[(cols+1)/2], Yf, (cols-1)/2* sizeof (*Yf));
//copy temp to first half
memmove(Yf, temp, (cols+1)/2* sizeof (*Yf));
free(temp);
}
__global__ void dropPrefix(cuFloatComplex *Y, cuFloatComplex *dY, int rows1, int cols1){
int rows = rows1;
int cols= cols1;
int tid = blockIdx.x*blockDim.x + threadIdx.x;
Y[tid] = dY[blockIdx.x*(blockDim.x+prefix) + threadIdx.x + prefix];
/*
for(int i =0; i<rows; i++){
memcpy(&Y[i*cols], &dY[i*(cols+prefix)+ prefix], cols*sizeof(*dY));
}
*/
}
__global__ void findHs(cuFloatComplex* dY, cuFloatComplex* dH, cuFloatComplex* dX, int rows1, int cols1){
int cols = cols1-1;
int rows = rows1;
int tid = (blockIdx.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*cols + threadIdx.x;
int tid2 = (blockIdx.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*(cols+1) + threadIdx.x + 1;
//find my work
//Drop first element and copy it into Hconj
if ((blockIdx.y + threadIdx.y)*blockDim.x + threadIdx.x < cols) {
dH[tid] = dY[tid2];
}
__syncthreads();
//complex division
//H/X where H = FFT(Y) (w/ dropped first element)
//Then take conjugate of H
if (tid < cols*rows) {
dH[tid] = cuCdivf(dH[tid], dX[tid]);
dH[tid] = cuConjf(dH[tid]);
//dX[tid].x = dH[tid].x * dH[tid].x + dH[tid].y * dH[tid].y;
}
__syncthreads();
//Now dH holds conj H
}
__global__ void findDistSqrd(cuFloatComplex* H, float* Hsqrd, int rows1, int cols1){
int cols = cols1;
int rows = rows1;
//int tid = blockIdx.x*cols + threadIdx.x;
extern __shared__ cuFloatComplex temp[];
int sid = threadIdx.x*cols + blockIdx.x*blockDim.y + threadIdx.y;
int tempID = threadIdx.y*rows + threadIdx.x;
if (sid < rows*cols) {
temp[tempID] = H[sid];
}
temp[tempID].x = temp[tempID].x*temp[tempID].x + temp[tempID].y*temp[tempID].y;
__syncthreads();
for (int i = 1; i < rows; i = i*2) {
if (threadIdx.x%(2*i) == 0 and (blockIdx.x*blockDim.y + threadIdx.y) < cols) {
temp[tempID].x += temp[tempID+i].x;
}
__syncthreads();
}
if(threadIdx.x == 0 and (blockIdx.x*blockDim.y + threadIdx.y) < cols) {
Hsqrd[blockIdx.x*blockDim.y + threadIdx.y] = temp[tempID].x;
}
}
__global__ void multiplyWithChannelConj(cuFloatComplex* Y, cuFloatComplex* Hconj, cuFloatComplex* Yf, int rows1, int cols1,int syms1 = 1){
int rows = rows1;
int cols = cols1-1;
int syms = syms1;
//find my work
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1023
int tid = (blockIdx.z*gridDim.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*cols + blockIdx.y*blockDim.x + threadIdx.x;
int tid2 = (blockIdx.z*gridDim.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*(cols+1) + blockIdx.y*blockDim.x + threadIdx.x + 1;
int hid = blockIdx.x*blockDim.y*cols + blockIdx.y*blockDim.x + threadIdx.y*cols + threadIdx.x;
if (blockIdx.y*blockDim.x + threadIdx.x < cols) {
Yf[tid] = Y[tid2];
}
__syncthreads();
if (tid < rows*cols*syms) {
Yf[tid] = cuCmulf(Yf[tid],Hconj[hid]);
}
__syncthreads();
}
__global__ void combineForMRC(cuFloatComplex* Y, float* Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int row = blockIdx.x*blockDim.y + threadIdx.y;
int col = threadIdx.x;
//int tid = blockIdx.x*blockDim.x + threadIdx.x;
extern __shared__ cuFloatComplex temp[];
int tempID = threadIdx.y*rows + threadIdx.x;
int sid = blockIdx.y*rows*cols + threadIdx.x*cols + blockIdx.x*blockDim.y + threadIdx.y;
temp[tempID] = Y[sid];
for (int i = 1; i < rows; i = i*2) {
if (threadIdx.x%(2*i) == 0 and row < cols) {
temp[tempID] = cuCaddf(temp[tempID],temp[tempID+i]);
}
__syncthreads();
}
if (threadIdx.x == 0 and row < cols) {
Y[row + cols*blockIdx.y].x = temp[tempID].x/Hsqrd[row];
Y[row + cols*blockIdx.y].y = temp[tempID].y/Hsqrd[row];
__syncthreads();
}
}
/*-----------------------------------GPU kernel calling functions--------------------------------------*/
void gpuLS::ShiftOneRow(cuFloatComplex* Y, int cols1, int rows1, dim3 blockDim, dim3 gridDim, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
shiftOneRow<< <gridDim, blockDim, 0, localStreamVar>> >(Y, cols1, rows1);
}
void gpuLS::DropPrefix(cuFloatComplex *Y, cuFloatComplex *dY, int rows1, int cols1, dim3 blockDim, dim3 gridDim, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
dropPrefix<< <gridDim, blockDim, 0, localStreamVar>> >(Y, dY, rows1, cols1);
}
void gpuLS::FindLeastSquaresGPU(cuFloatComplex* dY, cuFloatComplex* dH, cuFloatComplex* dX, int rows1, int cols1, dim3 blockDim, dim3 gridDim, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
findHs<< <gridDim, blockDim, 0, localStreamVar>> >(dY, dH, dX, rows1, cols1);
}
void gpuLS::FindHsqrdforMRC(cuFloatComplex* H, float* Hsqrd, int rows1, int cols1, dim3 blockDim, dim3 gridDim, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
size_t sharedMemSize = blockDim.x*blockDim.y*blockDim.z;
findDistSqrd<< <gridDim, blockDim, sharedMemSize, localStreamVar>> >(H, Hsqrd, rows1, cols1);
}
void gpuLS::MultiplyWithChannelConj(cuFloatComplex* Y, cuFloatComplex* Hconj, cuFloatComplex* Yf, int rows1, int cols1, int syms1, dim3 blockDim, dim3 gridDim, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
multiplyWithChannelConj<< <gridDim, blockDim, 0, localStreamVar>> >(Y, Hconj, Yf, rows1, cols1, syms1);
}
void gpuLS::CombineForMRC(cuFloatComplex* Y, float* Hsqrd, int rows1, int cols1, dim3 blockDim, dim3 gridDim, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
size_t sharedMemSize = blockDim.x*blockDim.y*blockDim.z;
combineForMRC<< <gridDim, blockDim, sharedMemSize, localStreamVar>> >(Y, Hsqrd, rows1, cols1);
}
/*-----------------------------------CuBlas based functions--------------------------------------*/
__global__ void findDistSqrdCuBlas(cuFloatComplex* H, float* Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int tid = blockIdx.x*blockDim.x + threadIdx.x;
hipblasHandle_t handle;
hipblasCreate(&handle);
if (tid < cols) {
hipblasScnrm2(handle, rows, &H[tid], cols, &Hsqrd[tid]);
}
hipblasDestroy(handle);
}
__global__ void multiplyWithChanEstCuBlas(cuFloatComplex* Y, cuFloatComplex* Hconj, cuFloatComplex* Yf, float* Hsqrd, int rows1, int cols1, int syms1 = 1) {
int rows = rows1, cols = cols1-1, syms = syms1;
int tid = blockIdx.z*gridDim.y*blockDim.y*cols + blockIdx.y*blockDim.x + threadIdx.y*cols + threadIdx.x;
int tid2 = blockIdx.z*gridDim.y*blockDim.y*(cols+1)*rows + blockIdx.y*blockDim.x + threadIdx.y*(cols+1) + threadIdx.x + 1;
int hid = blockIdx.y*blockDim.x + threadIdx.y*cols + threadIdx.x;
hipblasHandle_t handle;
hipblasCreate(&handle);
/*
if ((blockIdx.y + threadIdx.y)*cols + threadIdx.x < cols) {
Yf[tid] = Y[tid2];
}
__syncthreads();
*/
if (tid2 < rows*(cols+1)*syms) {
hipblasCdotc(handle, rows, &Hconj[hid], cols, &Y[tid2], cols + 1, &Yf[tid]);;
}
__syncthreads();
if (tid < cols*syms) {
Yf[tid].x = Yf[tid].x/Hsqrd[hid];
Yf[tid].y = Yf[tid].y/Hsqrd[hid];
}
hipblasDestroy(handle);
}
/*-----------------------------------Host Functions--------------------------------------*/
void gpuLS::batchedFFT(cuFloatComplex* Y, int rows, int cols, hipStream_t* stream) {
hipStream_t localStreamVar = *stream;
hipfftHandle plan;
hipfftPlan1d(&plan, cols, HIPFFT_C2C, rows);
cufftSeetStream(plan, localStreamVar);
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
}
void gpuLS::firstVector(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dH, cuFloatComplex* dX, float* Hsqrd, int rows, int cols, int it){
clock_t start, finish;
//std::cout << "Here...\n";
// CUFFT plan -> do it one time before?
//Read in Y with prefix
buffPtr->readNextSymbolCUDA(dY, it);
if(timerEn){
start = clock();
}
hipMemcpy(Y, dY, rows*cols*sizeof(*Y), hipMemcpyHostToDevice);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setReadT(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
if(timerEn){
start = clock();
}
//FFT(Y)
hipfftHandle plan;
hipfftPlan1d(&plan, cols, HIPFFT_C2C, rows);
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
hipfftDestroy(plan);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
//find Hconj and Hsqrd
if(timerEn){
start = clock();
}
// dim3 dimBlock(numOfBlocks, threadsPerBlock-1);
findHs<< <numOfBlocks, threadsPerBlock-1>> >(Y, dH, dX, rows, cols);
hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
findDistSqrd<< <threadsPerBlock-1, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(dH, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
//free(X);
//hipFree(H);
//dH holds H conj
//dX holds {H^2)
}
void gpuLS::demodOneSymbol(cuFloatComplex *dY, cuFloatComplex* Y, cuFloatComplex *Hconj, float *Hsqrd,int rows1, int cols1, int it) {
int rows = rows1;
int cols= cols1;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
if(it==numberOfSymbolsToTest-1){
//if last one
buffPtr->readLastSymbolCUDA(dY);
} else {
buffPtr->readNextSymbolCUDA(dY, it);
}
if(timerEn){
start = clock();
}
hipMemcpy(Y, dY, rows*cols*sizeof(*Y), hipMemcpyHostToDevice);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setReadT(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
if(timerEn){
start = clock();
}
//FFT(Y)
hipfftHandle plan;
hipfftPlan1d(&plan, threadsPerBlock, HIPFFT_C2C, numOfBlocks);
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
hipfftDestroy(plan);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
cuFloatComplex* Yf = 0;
hipMalloc((void**)&Yf, rows*(cols-1)* sizeof (*Yf));
if(timerEn){
start = clock();
}
multiplyWithChannelConj<< <numOfBlocks, threadsPerBlock-1>> >(Y, Hconj, Yf, rows, cols);
hipDeviceSynchronize();
combineForMRC<< <threadsPerBlock-1, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
hipMemcpy(dY, Yf, (cols-1)*sizeof(*dY), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
shiftOneRowCPU(dY,cols-1,0);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
hipFree(Yf);
hipDeviceSynchronize();
}
void gpuLS::demodOneFrame(cuFloatComplex *dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
for (int it = 0; it < numberOfSymbolsToTest; it++) {
if(it==numberOfSymbolsToTest-1){
//if last one
buffPtr->readLastSymbol(&dY[rows*cols*it]);
} else {
buffPtr->readNextSymbol(&dY[rows*cols*it], it);
}
}
if(timerEn){
start = clock();
}
hipMemcpy(Y, dY, rows*cols*(lenOfBuffer)*sizeof(*Y), hipMemcpyHostToDevice);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setReadT(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
//FFT(Y)
hipfftHandle plan;
hipfftPlan1d(&plan, cols, HIPFFT_C2C, rows*(lenOfBuffer));
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
hipfftDestroy(plan);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
// dim3 dimBlock(numOfBlocks, threadsPerBlock-1);
if (threadsPerBlock <= maxThreads) {
findHs<< <numOfBlocks, threadsPerBlock-1>> >(Y, Hconj, dX, rows, cols);
hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstDim(numOfBlocks,ceil(threadsPerBlock/maxThreads));
findHs<< <chanEstDim, maxThreads>> >(Y, Hconj, dX, rows, cols);
hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
findDistSqrd<< <threadsPerBlock-1, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Hconj, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
hipMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (threadsPerBlock <= maxThreads) {
dim3 gridDims1(numOfBlocks, 1, lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, threadsPerBlock-1>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
hipDeviceSynchronize();
} else {
dim3 gridDims1(numOfBlocks, ceil(threadsPerBlock/maxThreads), lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
hipDeviceSynchronize();
}
dim3 gridDims2(threadsPerBlock-1, lenOfBuffer-1);
combineForMRC<< <gridDims2, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
shiftOneRow<< <lenOfBuffer-1, threadsPerBlock-1, (threadsPerBlock-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
hipMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/*
for (int it = 1; it < lenOfBuffer-1; it++) {
shiftOneRowCPU(dY,cols-1,0);
}
*/
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
hipFree(Yf);
hipDeviceSynchronize();
}
void gpuLS::demodOneFrameCUDA(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
/*
for (int it = 0; it < numberOfSymbolsToTest; it++) {
if(it==numberOfSymbolsToTest-1){
//if last one
buffPtr->readLastSymbolCUDA(&Y[rows*cols*it]);
} else {
buffPtr->readNextSymbolCUDA(&Y[rows*cols*it], it);
}
}
// hipDeviceSynchronize();
*/
if(timerEn){
start = clock();
}
//FFT(Y)
hipfftHandle plan;
hipfftPlan1d(&plan, cols, HIPFFT_C2C, rows*(lenOfBuffer));
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
hipfftDestroy(plan);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
// dim3 dimBlock(numOfBlocks, threadsPerBlock-1);
if (threadsPerBlock <= maxThreads) {
findHs<< <numOfBlocks, threadsPerBlock>> >(Y, Hconj, dX, rows, cols);
hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstBlockDim1(maxThreads);
dim3 chanEstGridDim1(numOfBlocks,ceil((float)threadsPerBlock/(float)maxThreads));
findHs<< <chanEstGridDim1, chanEstBlockDim1>> >(Y, Hconj, dX, rows, cols);
// hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
// dim3 chanEstBlockDim2(rows,ceil((float)maxThreads/(float)rows));
// dim3 chanEstGridDim2(ceil((float)(cols)/(ceil((float)maxThreads/(float)rows))),ceil((float)rows/(float)maxThreads));
findDistSqrd<< <threadsPerBlock-1,numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Hconj, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
hipMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (threadsPerBlock <= maxThreads) {
dim3 gridDims1(numOfBlocks, 1, lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, threadsPerBlock-1>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
hipDeviceSynchronize();
} else {
dim3 gridDims1(numOfBlocks, ceil((float)threadsPerBlock/(float)maxThreads), lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
hipDeviceSynchronize();
}
dim3 gridDims2(threadsPerBlock-1, lenOfBuffer-1);
combineForMRC<< <gridDims2, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
if (threadsPerBlock <= maxThreads) {
dim3 gridDims3(1,lenOfBuffer-1);
shiftOneRow<< <gridDims3, threadsPerBlock-1, (threadsPerBlock-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
} else {
dim3 gridDims3(ceil((float)threadsPerBlock/(float)maxThreads), lenOfBuffer-1);
shiftOneRow<< <gridDims3, maxThreads, (threadsPerBlock-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
}
hipMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/*
for (int it = 1; it < lenOfBuffer-1; it++) {
shiftOneRowCPU(dY,cols-1,0);
}
*/
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
hipFree(Yf);
}
void gpuLS::demodOptimized(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
// hipblasHandle_t handle;
// hipblasCreate(&handle);
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
// hipDeviceSynchronize();
if(timerEn){
start = clock();
}
//FFT(Y)
hipfftHandle plan;
hipfftPlan1d(&plan, cols, HIPFFT_C2C, rows*(lenOfBuffer));
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
hipfftDestroy(plan);
// hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
if (threadsPerBlock <= maxThreads) {
dim3 chanEstBlockDim(cols,ceil((float)maxThreads/(float)cols));
dim3 chanEstGridDim(ceil((float)rows/ceil((float)maxThreads/(float)cols)),1);
findHs<< <chanEstGridDim, chanEstBlockDim>> >(Y, Hconj, dX, rows, cols);
hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstBlockDim1(maxThreads);
dim3 chanEstGridDim1(rows,ceil((float)cols/(float)maxThreads));
findHs<< <chanEstGridDim1, chanEstBlockDim1>> >(Y, Hconj, dX, rows, cols);
hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
dim3 distSqrdBlockDim(rows,ceil((float)maxThreads/(float)rows));
dim3 distSqrdGridDim(ceil((float)(cols-1)/ceil((float)maxThreads/(float)rows)),1);
findDistSqrd<< <distSqrdGridDim, distSqrdBlockDim, maxThreads*sizeof(cuFloatComplex)>> >(Hconj, Hsqrd, rows, cols-1);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
hipMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (threadsPerBlock <= maxThreads) {
dim3 blockDims1(cols,ceil((float)maxThreads/(float)cols));
dim3 gridDims1(ceil((float)rows/ceil((float)maxThreads/(float)cols)), 1, lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, blockDims1>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
hipDeviceSynchronize();
} else {
dim3 gridDims1(rows, ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
hipDeviceSynchronize();
}
dim3 blockDims2(rows,ceil((float)maxThreads/(float)rows));
dim3 gridDims2(ceil((float)(cols-1)/ceil((float)maxThreads/(float)rows)), lenOfBuffer-1);
combineForMRC<< <gridDims2, blockDims2, maxThreads*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
hipDeviceSynchronize();
if (cols <= maxThreads) {
dim3 gridDims3(1,lenOfBuffer-1);
shiftOneRow<< <gridDims3, cols-1, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
} else {
dim3 gridDims3(ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
shiftOneRow<< <gridDims3, maxThreads, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
}
hipMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), hipMemcpyDeviceToHost);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
hipFree(Yf);
}
void gpuLS::demodCuBlas(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
// hipblasHandle_t handle;
// hipblasCreate(&handle);
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
// hipDeviceSynchronize();
if(timerEn){
start = clock();
}
//FFT(Y)
hipfftHandle plan;
hipfftPlan1d(&plan, cols, HIPFFT_C2C, rows*(lenOfBuffer));
hipfftExecC2C(plan, (hipfftComplex *)Y, (hipfftComplex *)Y, HIPFFT_FORWARD);
hipfftDestroy(plan);
// hipDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
if (cols <= maxThreads) {
dim3 chanEstBlockDim(cols,ceil((float)maxThreads/(float)cols));
dim3 chanEstGridDim(ceil((float)rows/ceil((float)maxThreads/(float)cols)),1);
findHs<< <chanEstGridDim, chanEstBlockDim>> >(Y, Hconj, dX, rows, cols);
findDistSqrdCuBlas<< <1, cols-1>> >(Hconj, Hsqrd, rows, cols-1);
// hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstBlockDim1(maxThreads);
dim3 chanEstGridDim1(rows,ceil((float)cols/(float)maxThreads));
findHs<< <chanEstGridDim1, chanEstBlockDim1>> >(Y, Hconj, dX, rows, cols);
findDistSqrdCuBlas<< <ceil((float)cols/(float)maxThreads), chanEstBlockDim1>> >(Hconj, Hsqrd, rows, cols-1);
// hipDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
hipMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (cols <= maxThreads) {
dim3 blockDims1(cols,ceil((float)maxThreads/(float)cols));
dim3 gridDims1(1, 1, ceil((float)lenOfBuffer-1/(ceil((float)maxThreads/(float)cols))));
multiplyWithChanEstCuBlas<< <gridDims1, blockDims1>> >(&Y[rows*cols], Hconj, Yf, Hsqrd, rows, cols, numberOfSymbolsToTest-1);
// hipDeviceSynchronize();
} else {
dim3 gridDims1(1, ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
multiplyWithChanEstCuBlas<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, Hsqrd, rows, cols, numberOfSymbolsToTest-1);
// hipDeviceSynchronize();
}
if (cols <= maxThreads) {
dim3 gridDims3(1,lenOfBuffer-1);
shiftOneRow<< <gridDims3, cols-1, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
} else {
dim3 gridDims3(ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
shiftOneRow<< <gridDims3, maxThreads, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
hipDeviceSynchronize();
}
hipMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), hipMemcpyDeviceToHost);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
hipFree(Yf);
} | 57530e925a68765d83e3e99c42a4657e696a2014.cu |
#include "gpuLS.cuh"
/*
mode:
= 1 -> master -> creates shared memory
= 0 -> slave -> doesn't create the shared memory
*/
//LS
//Y = 16 x 1024
//X = 1 x 1023
//H = 16 x 1023
using namespace std;
gpuLS::gpuLS() {
//Shared Memory
std::string shm_uid = shmemID;
buffPtr = new ShMemSymBuff(shm_uid, mode);
}
gpuLS::~gpuLS() {}
//Reads in Vector X from file -> 1xcols
void gpuLS::matrix_readX(cuFloatComplex* X, int cols){
ifstream inFile;
inFile.open(fileNameForX);
if (!inFile) {
cerr << "Unable to open file "<< fileNameForX<<", filling in 1+i for x\n";
float c=1.0f;
for (int col = 0; col < cols; col++){
X[col].x = c;
X[col].y = c;
}
return;
}
inFile.read((char*)X, (cols)*sizeof(*X));
/*
float c=0;
for (int col = 0; col < cols; col++){
inFile >> c;
X[col].real=c;
inFile >> c;
X[col].imag=c;
}
*/
cuFloatComplex* temp = 0;
temp=(cuFloatComplex*)malloc ((cols-1)/2* sizeof (*temp));
//copy second half to temp
memmove(temp, &X[(cols+1)/2], (cols-1)/2* sizeof (*X));
//copy first half to second half
memmove(&X[(cols-1)/2], X, (cols+1)/2* sizeof (*X));
//copy temp to first half
memmove(X, temp, (cols-1)/2* sizeof (*X));
free(temp);
inFile.close();
}
void gpuLS::copyPilotToGPU(cuFloatComplex* dX, int rows, int cols) {
//X = 1x1023 -> later can become |H|^2
cuFloatComplex* X = 0;
int sizeX=rows*(cols-1)* sizeof(*X);
X = (cuFloatComplex*)malloc(sizeX);
//cuFloatComplex* H =0;
//H = (cuFloatComplex *)malloc(sizeX*rows);
//cudaMalloc((void**)&H, size);
//Read in X vector -> 1x1023
for (int i = 0; i < rows; i++) {
//std::cout << "Here...\n";
matrix_readX(&X[i*(cols-1)], cols-1);
}
//std::cout << "Here...\n";
cudaMemcpy(dX, X, rows*(cols-1)*sizeof(*dX), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
free(X);
}
__global__ void shiftOneRow(cuFloatComplex* Y, int cols1, int rows1){
int cols = cols1;
//int rows = rows1;
int col = threadIdx.y*cols + threadIdx.x;
int tid = blockIdx.y*gridDim.x*blockDim.y*cols + blockIdx.x*blockDim.y*cols + threadIdx.y*cols + threadIdx.x;
extern __shared__ cuFloatComplex temp[];
if ((threadIdx.x + blockIdx.x*blockDim.x) < (cols+1)/2) {
temp[col] = Y[tid+((cols-1)/2)];
} else if ((threadIdx.x + blockIdx.x*blockDim.x) >= (cols+1)/2 and (threadIdx.x + blockIdx.x*blockDim.x) < cols) {
temp[col] = Y[tid-((cols+1)/2)];
}
__syncthreads();
Y[tid] = temp[col];
__syncthreads();
}
void gpuLS::shiftOneRowCPU(cuFloatComplex* Y, int cols, int row){
cuFloatComplex* Yf = &Y[row*cols];
//std::cout << "Here...\n";
cuFloatComplex* temp = 0;
temp=(cuFloatComplex*)malloc ((cols+1)/2* sizeof (*temp));
//copy second half to temp
memmove(temp, &Yf[(cols-1)/2], (cols+1)/2* sizeof (*Yf));
//copy first half to second half
memmove(&Yf[(cols+1)/2], Yf, (cols-1)/2* sizeof (*Yf));
//copy temp to first half
memmove(Yf, temp, (cols+1)/2* sizeof (*Yf));
free(temp);
}
__global__ void dropPrefix(cuFloatComplex *Y, cuFloatComplex *dY, int rows1, int cols1){
int rows = rows1;
int cols= cols1;
int tid = blockIdx.x*blockDim.x + threadIdx.x;
Y[tid] = dY[blockIdx.x*(blockDim.x+prefix) + threadIdx.x + prefix];
/*
for(int i =0; i<rows; i++){
memcpy(&Y[i*cols], &dY[i*(cols+prefix)+ prefix], cols*sizeof(*dY));
}
*/
}
__global__ void findHs(cuFloatComplex* dY, cuFloatComplex* dH, cuFloatComplex* dX, int rows1, int cols1){
int cols = cols1-1;
int rows = rows1;
int tid = (blockIdx.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*cols + threadIdx.x;
int tid2 = (blockIdx.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*(cols+1) + threadIdx.x + 1;
//find my work
//Drop first element and copy it into Hconj
if ((blockIdx.y + threadIdx.y)*blockDim.x + threadIdx.x < cols) {
dH[tid] = dY[tid2];
}
__syncthreads();
//complex division
//H/X where H = FFT(Y) (w/ dropped first element)
//Then take conjugate of H
if (tid < cols*rows) {
dH[tid] = cuCdivf(dH[tid], dX[tid]);
dH[tid] = cuConjf(dH[tid]);
//dX[tid].x = dH[tid].x * dH[tid].x + dH[tid].y * dH[tid].y;
}
__syncthreads();
//Now dH holds conj H
}
__global__ void findDistSqrd(cuFloatComplex* H, float* Hsqrd, int rows1, int cols1){
int cols = cols1;
int rows = rows1;
//int tid = blockIdx.x*cols + threadIdx.x;
extern __shared__ cuFloatComplex temp[];
int sid = threadIdx.x*cols + blockIdx.x*blockDim.y + threadIdx.y;
int tempID = threadIdx.y*rows + threadIdx.x;
if (sid < rows*cols) {
temp[tempID] = H[sid];
}
temp[tempID].x = temp[tempID].x*temp[tempID].x + temp[tempID].y*temp[tempID].y;
__syncthreads();
for (int i = 1; i < rows; i = i*2) {
if (threadIdx.x%(2*i) == 0 and (blockIdx.x*blockDim.y + threadIdx.y) < cols) {
temp[tempID].x += temp[tempID+i].x;
}
__syncthreads();
}
if(threadIdx.x == 0 and (blockIdx.x*blockDim.y + threadIdx.y) < cols) {
Hsqrd[blockIdx.x*blockDim.y + threadIdx.y] = temp[tempID].x;
}
}
__global__ void multiplyWithChannelConj(cuFloatComplex* Y, cuFloatComplex* Hconj, cuFloatComplex* Yf, int rows1, int cols1,int syms1 = 1){
int rows = rows1;
int cols = cols1-1;
int syms = syms1;
//find my work
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1023
int tid = (blockIdx.z*gridDim.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*cols + blockIdx.y*blockDim.x + threadIdx.x;
int tid2 = (blockIdx.z*gridDim.y*gridDim.x*blockDim.y + blockIdx.x*blockDim.y + threadIdx.y)*(cols+1) + blockIdx.y*blockDim.x + threadIdx.x + 1;
int hid = blockIdx.x*blockDim.y*cols + blockIdx.y*blockDim.x + threadIdx.y*cols + threadIdx.x;
if (blockIdx.y*blockDim.x + threadIdx.x < cols) {
Yf[tid] = Y[tid2];
}
__syncthreads();
if (tid < rows*cols*syms) {
Yf[tid] = cuCmulf(Yf[tid],Hconj[hid]);
}
__syncthreads();
}
__global__ void combineForMRC(cuFloatComplex* Y, float* Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int row = blockIdx.x*blockDim.y + threadIdx.y;
int col = threadIdx.x;
//int tid = blockIdx.x*blockDim.x + threadIdx.x;
extern __shared__ cuFloatComplex temp[];
int tempID = threadIdx.y*rows + threadIdx.x;
int sid = blockIdx.y*rows*cols + threadIdx.x*cols + blockIdx.x*blockDim.y + threadIdx.y;
temp[tempID] = Y[sid];
for (int i = 1; i < rows; i = i*2) {
if (threadIdx.x%(2*i) == 0 and row < cols) {
temp[tempID] = cuCaddf(temp[tempID],temp[tempID+i]);
}
__syncthreads();
}
if (threadIdx.x == 0 and row < cols) {
Y[row + cols*blockIdx.y].x = temp[tempID].x/Hsqrd[row];
Y[row + cols*blockIdx.y].y = temp[tempID].y/Hsqrd[row];
__syncthreads();
}
}
/*-----------------------------------GPU kernel calling functions--------------------------------------*/
void gpuLS::ShiftOneRow(cuFloatComplex* Y, int cols1, int rows1, dim3 blockDim, dim3 gridDim, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
shiftOneRow<< <gridDim, blockDim, 0, localStreamVar>> >(Y, cols1, rows1);
}
void gpuLS::DropPrefix(cuFloatComplex *Y, cuFloatComplex *dY, int rows1, int cols1, dim3 blockDim, dim3 gridDim, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
dropPrefix<< <gridDim, blockDim, 0, localStreamVar>> >(Y, dY, rows1, cols1);
}
void gpuLS::FindLeastSquaresGPU(cuFloatComplex* dY, cuFloatComplex* dH, cuFloatComplex* dX, int rows1, int cols1, dim3 blockDim, dim3 gridDim, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
findHs<< <gridDim, blockDim, 0, localStreamVar>> >(dY, dH, dX, rows1, cols1);
}
void gpuLS::FindHsqrdforMRC(cuFloatComplex* H, float* Hsqrd, int rows1, int cols1, dim3 blockDim, dim3 gridDim, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
size_t sharedMemSize = blockDim.x*blockDim.y*blockDim.z;
findDistSqrd<< <gridDim, blockDim, sharedMemSize, localStreamVar>> >(H, Hsqrd, rows1, cols1);
}
void gpuLS::MultiplyWithChannelConj(cuFloatComplex* Y, cuFloatComplex* Hconj, cuFloatComplex* Yf, int rows1, int cols1, int syms1, dim3 blockDim, dim3 gridDim, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
multiplyWithChannelConj<< <gridDim, blockDim, 0, localStreamVar>> >(Y, Hconj, Yf, rows1, cols1, syms1);
}
void gpuLS::CombineForMRC(cuFloatComplex* Y, float* Hsqrd, int rows1, int cols1, dim3 blockDim, dim3 gridDim, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
size_t sharedMemSize = blockDim.x*blockDim.y*blockDim.z;
combineForMRC<< <gridDim, blockDim, sharedMemSize, localStreamVar>> >(Y, Hsqrd, rows1, cols1);
}
/*-----------------------------------CuBlas based functions--------------------------------------*/
__global__ void findDistSqrdCuBlas(cuFloatComplex* H, float* Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int tid = blockIdx.x*blockDim.x + threadIdx.x;
cublasHandle_t handle;
cublasCreate(&handle);
if (tid < cols) {
cublasScnrm2(handle, rows, &H[tid], cols, &Hsqrd[tid]);
}
cublasDestroy(handle);
}
__global__ void multiplyWithChanEstCuBlas(cuFloatComplex* Y, cuFloatComplex* Hconj, cuFloatComplex* Yf, float* Hsqrd, int rows1, int cols1, int syms1 = 1) {
int rows = rows1, cols = cols1-1, syms = syms1;
int tid = blockIdx.z*gridDim.y*blockDim.y*cols + blockIdx.y*blockDim.x + threadIdx.y*cols + threadIdx.x;
int tid2 = blockIdx.z*gridDim.y*blockDim.y*(cols+1)*rows + blockIdx.y*blockDim.x + threadIdx.y*(cols+1) + threadIdx.x + 1;
int hid = blockIdx.y*blockDim.x + threadIdx.y*cols + threadIdx.x;
cublasHandle_t handle;
cublasCreate(&handle);
/*
if ((blockIdx.y + threadIdx.y)*cols + threadIdx.x < cols) {
Yf[tid] = Y[tid2];
}
__syncthreads();
*/
if (tid2 < rows*(cols+1)*syms) {
cublasCdotc(handle, rows, &Hconj[hid], cols, &Y[tid2], cols + 1, &Yf[tid]);;
}
__syncthreads();
if (tid < cols*syms) {
Yf[tid].x = Yf[tid].x/Hsqrd[hid];
Yf[tid].y = Yf[tid].y/Hsqrd[hid];
}
cublasDestroy(handle);
}
/*-----------------------------------Host Functions--------------------------------------*/
void gpuLS::batchedFFT(cuFloatComplex* Y, int rows, int cols, cudaStream_t* stream) {
cudaStream_t localStreamVar = *stream;
cufftHandle plan;
cufftPlan1d(&plan, cols, CUFFT_C2C, rows);
cufftSeetStream(plan, localStreamVar);
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
}
void gpuLS::firstVector(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dH, cuFloatComplex* dX, float* Hsqrd, int rows, int cols, int it){
clock_t start, finish;
//std::cout << "Here...\n";
// CUFFT plan -> do it one time before?
//Read in Y with prefix
buffPtr->readNextSymbolCUDA(dY, it);
if(timerEn){
start = clock();
}
cudaMemcpy(Y, dY, rows*cols*sizeof(*Y), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setReadT(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
if(timerEn){
start = clock();
}
//FFT(Y)
cufftHandle plan;
cufftPlan1d(&plan, cols, CUFFT_C2C, rows);
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
cufftDestroy(plan);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
//find Hconj and Hsqrd
if(timerEn){
start = clock();
}
// dim3 dimBlock(numOfBlocks, threadsPerBlock-1);
findHs<< <numOfBlocks, threadsPerBlock-1>> >(Y, dH, dX, rows, cols);
cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
findDistSqrd<< <threadsPerBlock-1, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(dH, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
//free(X);
//cudaFree(H);
//dH holds H conj
//dX holds {H^2)
}
void gpuLS::demodOneSymbol(cuFloatComplex *dY, cuFloatComplex* Y, cuFloatComplex *Hconj, float *Hsqrd,int rows1, int cols1, int it) {
int rows = rows1;
int cols= cols1;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
if(it==numberOfSymbolsToTest-1){
//if last one
buffPtr->readLastSymbolCUDA(dY);
} else {
buffPtr->readNextSymbolCUDA(dY, it);
}
if(timerEn){
start = clock();
}
cudaMemcpy(Y, dY, rows*cols*sizeof(*Y), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setReadT(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
if(timerEn){
start = clock();
}
//FFT(Y)
cufftHandle plan;
cufftPlan1d(&plan, threadsPerBlock, CUFFT_C2C, numOfBlocks);
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
cufftDestroy(plan);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
cuFloatComplex* Yf = 0;
cudaMalloc((void**)&Yf, rows*(cols-1)* sizeof (*Yf));
if(timerEn){
start = clock();
}
multiplyWithChannelConj<< <numOfBlocks, threadsPerBlock-1>> >(Y, Hconj, Yf, rows, cols);
cudaDeviceSynchronize();
combineForMRC<< <threadsPerBlock-1, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
cudaMemcpy(dY, Yf, (cols-1)*sizeof(*dY), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
shiftOneRowCPU(dY,cols-1,0);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, it);
}
cudaFree(Yf);
cudaDeviceSynchronize();
}
void gpuLS::demodOneFrame(cuFloatComplex *dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
for (int it = 0; it < numberOfSymbolsToTest; it++) {
if(it==numberOfSymbolsToTest-1){
//if last one
buffPtr->readLastSymbol(&dY[rows*cols*it]);
} else {
buffPtr->readNextSymbol(&dY[rows*cols*it], it);
}
}
if(timerEn){
start = clock();
}
cudaMemcpy(Y, dY, rows*cols*(lenOfBuffer)*sizeof(*Y), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setReadT(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
//FFT(Y)
cufftHandle plan;
cufftPlan1d(&plan, cols, CUFFT_C2C, rows*(lenOfBuffer));
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
cufftDestroy(plan);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
// dim3 dimBlock(numOfBlocks, threadsPerBlock-1);
if (threadsPerBlock <= maxThreads) {
findHs<< <numOfBlocks, threadsPerBlock-1>> >(Y, Hconj, dX, rows, cols);
cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstDim(numOfBlocks,ceil(threadsPerBlock/maxThreads));
findHs<< <chanEstDim, maxThreads>> >(Y, Hconj, dX, rows, cols);
cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
findDistSqrd<< <threadsPerBlock-1, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Hconj, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
cudaMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (threadsPerBlock <= maxThreads) {
dim3 gridDims1(numOfBlocks, 1, lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, threadsPerBlock-1>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
cudaDeviceSynchronize();
} else {
dim3 gridDims1(numOfBlocks, ceil(threadsPerBlock/maxThreads), lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
cudaDeviceSynchronize();
}
dim3 gridDims2(threadsPerBlock-1, lenOfBuffer-1);
combineForMRC<< <gridDims2, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
shiftOneRow<< <lenOfBuffer-1, threadsPerBlock-1, (threadsPerBlock-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
cudaMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/*
for (int it = 1; it < lenOfBuffer-1; it++) {
shiftOneRowCPU(dY,cols-1,0);
}
*/
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
cudaFree(Yf);
cudaDeviceSynchronize();
}
void gpuLS::demodOneFrameCUDA(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
/*
for (int it = 0; it < numberOfSymbolsToTest; it++) {
if(it==numberOfSymbolsToTest-1){
//if last one
buffPtr->readLastSymbolCUDA(&Y[rows*cols*it]);
} else {
buffPtr->readNextSymbolCUDA(&Y[rows*cols*it], it);
}
}
// cudaDeviceSynchronize();
*/
if(timerEn){
start = clock();
}
//FFT(Y)
cufftHandle plan;
cufftPlan1d(&plan, cols, CUFFT_C2C, rows*(lenOfBuffer));
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
cufftDestroy(plan);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
// dim3 dimBlock(numOfBlocks, threadsPerBlock-1);
if (threadsPerBlock <= maxThreads) {
findHs<< <numOfBlocks, threadsPerBlock>> >(Y, Hconj, dX, rows, cols);
cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstBlockDim1(maxThreads);
dim3 chanEstGridDim1(numOfBlocks,ceil((float)threadsPerBlock/(float)maxThreads));
findHs<< <chanEstGridDim1, chanEstBlockDim1>> >(Y, Hconj, dX, rows, cols);
// cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
// dim3 chanEstBlockDim2(rows,ceil((float)maxThreads/(float)rows));
// dim3 chanEstGridDim2(ceil((float)(cols)/(ceil((float)maxThreads/(float)rows))),ceil((float)rows/(float)maxThreads));
findDistSqrd<< <threadsPerBlock-1,numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Hconj, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
cudaMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (threadsPerBlock <= maxThreads) {
dim3 gridDims1(numOfBlocks, 1, lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, threadsPerBlock-1>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
cudaDeviceSynchronize();
} else {
dim3 gridDims1(numOfBlocks, ceil((float)threadsPerBlock/(float)maxThreads), lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
cudaDeviceSynchronize();
}
dim3 gridDims2(threadsPerBlock-1, lenOfBuffer-1);
combineForMRC<< <gridDims2, numOfBlocks, numOfBlocks*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
if (threadsPerBlock <= maxThreads) {
dim3 gridDims3(1,lenOfBuffer-1);
shiftOneRow<< <gridDims3, threadsPerBlock-1, (threadsPerBlock-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
} else {
dim3 gridDims3(ceil((float)threadsPerBlock/(float)maxThreads), lenOfBuffer-1);
shiftOneRow<< <gridDims3, maxThreads, (threadsPerBlock-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
}
cudaMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/*
for (int it = 1; it < lenOfBuffer-1; it++) {
shiftOneRowCPU(dY,cols-1,0);
}
*/
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
cudaFree(Yf);
}
void gpuLS::demodOptimized(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
// cublasHandle_t handle;
// cublasCreate(&handle);
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
// cudaDeviceSynchronize();
if(timerEn){
start = clock();
}
//FFT(Y)
cufftHandle plan;
cufftPlan1d(&plan, cols, CUFFT_C2C, rows*(lenOfBuffer));
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
cufftDestroy(plan);
// cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
if (threadsPerBlock <= maxThreads) {
dim3 chanEstBlockDim(cols,ceil((float)maxThreads/(float)cols));
dim3 chanEstGridDim(ceil((float)rows/ceil((float)maxThreads/(float)cols)),1);
findHs<< <chanEstGridDim, chanEstBlockDim>> >(Y, Hconj, dX, rows, cols);
cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstBlockDim1(maxThreads);
dim3 chanEstGridDim1(rows,ceil((float)cols/(float)maxThreads));
findHs<< <chanEstGridDim1, chanEstBlockDim1>> >(Y, Hconj, dX, rows, cols);
cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
dim3 distSqrdBlockDim(rows,ceil((float)maxThreads/(float)rows));
dim3 distSqrdGridDim(ceil((float)(cols-1)/ceil((float)maxThreads/(float)rows)),1);
findDistSqrd<< <distSqrdGridDim, distSqrdBlockDim, maxThreads*sizeof(cuFloatComplex)>> >(Hconj, Hsqrd, rows, cols-1);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
cudaMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (threadsPerBlock <= maxThreads) {
dim3 blockDims1(cols,ceil((float)maxThreads/(float)cols));
dim3 gridDims1(ceil((float)rows/ceil((float)maxThreads/(float)cols)), 1, lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, blockDims1>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
cudaDeviceSynchronize();
} else {
dim3 gridDims1(rows, ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
multiplyWithChannelConj<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, rows, cols, numberOfSymbolsToTest-1);
cudaDeviceSynchronize();
}
dim3 blockDims2(rows,ceil((float)maxThreads/(float)rows));
dim3 gridDims2(ceil((float)(cols-1)/ceil((float)maxThreads/(float)rows)), lenOfBuffer-1);
combineForMRC<< <gridDims2, blockDims2, maxThreads*sizeof(cuFloatComplex)>> >(Yf, Hsqrd, rows, cols-1);
cudaDeviceSynchronize();
if (cols <= maxThreads) {
dim3 gridDims3(1,lenOfBuffer-1);
shiftOneRow<< <gridDims3, cols-1, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
} else {
dim3 gridDims3(ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
shiftOneRow<< <gridDims3, maxThreads, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
}
cudaMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), cudaMemcpyDeviceToHost);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
cudaFree(Yf);
}
void gpuLS::demodCuBlas(cuFloatComplex* dY, cuFloatComplex* Y, cuFloatComplex* dX, cuFloatComplex *Hconj, float *Hsqrd, int rows1, int cols1) {
// cublasHandle_t handle;
// cublasCreate(&handle);
int rows = rows1;
int cols = cols1;
int maxThreads = devProp.maxThreadsPerBlock;
clock_t start, finish;
//Y x conj(H) -> then sum all rows into elements in Hsqrd
//Y = 16x1024+prefix
//conjH = 16x1024
// cudaDeviceSynchronize();
if(timerEn){
start = clock();
}
//FFT(Y)
cufftHandle plan;
cufftPlan1d(&plan, cols, CUFFT_C2C, rows*(lenOfBuffer));
cufftExecC2C(plan, (cufftComplex *)Y, (cufftComplex *)Y, CUFFT_FORWARD);
cufftDestroy(plan);
// cudaDeviceSynchronize();
if(timerEn){
finish = clock();
buffPtr->setFft(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
if(timerEn){
start = clock();
}
if (cols <= maxThreads) {
dim3 chanEstBlockDim(cols,ceil((float)maxThreads/(float)cols));
dim3 chanEstGridDim(ceil((float)rows/ceil((float)maxThreads/(float)cols)),1);
findHs<< <chanEstGridDim, chanEstBlockDim>> >(Y, Hconj, dX, rows, cols);
findDistSqrdCuBlas<< <1, cols-1>> >(Hconj, Hsqrd, rows, cols-1);
// cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
} else {
dim3 chanEstBlockDim1(maxThreads);
dim3 chanEstGridDim1(rows,ceil((float)cols/(float)maxThreads));
findHs<< <chanEstGridDim1, chanEstBlockDim1>> >(Y, Hconj, dX, rows, cols);
findDistSqrdCuBlas<< <ceil((float)cols/(float)maxThreads), chanEstBlockDim1>> >(Hconj, Hsqrd, rows, cols-1);
// cudaDeviceSynchronize();
//Save |H|^2 into Hsqrd
}
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 0);
}
if(timerEn){
start = clock();
}
cuFloatComplex* Yf = 0;
cudaMalloc((void**)&Yf, rows*(cols-1)*(lenOfBuffer-1)* sizeof (*Yf));
if (cols <= maxThreads) {
dim3 blockDims1(cols,ceil((float)maxThreads/(float)cols));
dim3 gridDims1(1, 1, ceil((float)lenOfBuffer-1/(ceil((float)maxThreads/(float)cols))));
multiplyWithChanEstCuBlas<< <gridDims1, blockDims1>> >(&Y[rows*cols], Hconj, Yf, Hsqrd, rows, cols, numberOfSymbolsToTest-1);
// cudaDeviceSynchronize();
} else {
dim3 gridDims1(1, ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
multiplyWithChanEstCuBlas<< <gridDims1, maxThreads>> >(&Y[rows*cols], Hconj, Yf, Hsqrd, rows, cols, numberOfSymbolsToTest-1);
// cudaDeviceSynchronize();
}
if (cols <= maxThreads) {
dim3 gridDims3(1,lenOfBuffer-1);
shiftOneRow<< <gridDims3, cols-1, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
} else {
dim3 gridDims3(ceil((float)cols/(float)maxThreads), lenOfBuffer-1);
shiftOneRow<< <gridDims3, maxThreads, (cols-1)*sizeof(cuFloatComplex)>> >(Yf, cols-1, rows);
cudaDeviceSynchronize();
}
cudaMemcpy(dY, Yf, (cols-1)*(lenOfBuffer-1)*sizeof(*dY), cudaMemcpyDeviceToHost);
if(timerEn){
finish = clock();
buffPtr->setDecode(((float)(finish - start))/(float)CLOCKS_PER_SEC, 1);
}
cudaFree(Yf);
} |
73c20fc7e917d6629c5900d96e2036b42fba3447.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrixMultiplyTiled.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int numARows = 1;
int numAColumns = 1;
int numBRows = 1;
int numBColumns = 1;
int numCRows = 1;
int numCColumns = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrixMultiplyTiled), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrixMultiplyTiled), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrixMultiplyTiled), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 73c20fc7e917d6629c5900d96e2036b42fba3447.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrixMultiplyTiled.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int numARows = 1;
int numAColumns = 1;
int numBRows = 1;
int numBColumns = 1;
int numCRows = 1;
int numCColumns = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrixMultiplyTiled<<<gridBlock,threadBlock>>>(A,B,C,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrixMultiplyTiled<<<gridBlock,threadBlock>>>(A,B,C,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrixMultiplyTiled<<<gridBlock,threadBlock>>>(A,B,C,numARows,numAColumns,numBRows,numBColumns,numCRows,numCColumns);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
51becf094b6b365facf931ac9dcba173269904f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/l1_feature_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void shostakovich_sym_no_5(const int count, const Dtype* bottom_data, Dtype* diff_data) {
CUDA_KERNEL_LOOP(index, count) {
if (bottom_data[index] == 0) {
diff_data[index] = 0;
}
}
}
template <typename Dtype>
void L1FeatureLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
//caffe_gpu_sub(
// count,
// bottom[0]->gpu_data(),
// bottom[1]->gpu_data(),
// diff_.mutable_gpu_data());
//caffe_copy(count, bottom[0]->gpu_data(), diff_.mutable_gpu_data());
//Dtype dot;
//caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
//Dtype loss = dot / bottom[0]->num() / Dtype(2);
//top[0]->mutable_cpu_data()[0] = loss;
caffe_gpu_abs(count, bottom[0]->gpu_data(), temp_abs_.mutable_gpu_data());
caffe_gpu_div(count, bottom[0]->gpu_data(), temp_abs_.gpu_data(), diff_.mutable_gpu_data());
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* diff_data = diff_.mutable_gpu_data();
//for (int i=0; i < bottom[0]->count(); i++) {
// if ((bottom_data[i]) == 0) {
// diff_data[i] = 0;
// }
//}
hipLaunchKernelGGL(( shostakovich_sym_no_5<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, diff_data);
CUDA_POST_KERNEL_CHECK;
Dtype sum;
//caffe_gpu_asum(count, temp_abs_.gpu_data(), &sum);
caffe_gpu_dot(count, temp_abs_.gpu_data(), uni_temp_.gpu_data(), &sum);
Dtype loss = sum / bottom[0]->num();
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1FeatureLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
/*for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}*/
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[0]->num();
caffe_gpu_axpby(
bottom[0]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[0]->mutable_gpu_diff()); // b
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(L1FeatureLossLayer);
} // namespace caffe
| 51becf094b6b365facf931ac9dcba173269904f4.cu | #include <vector>
#include "caffe/layers/l1_feature_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void shostakovich_sym_no_5(const int count, const Dtype* bottom_data, Dtype* diff_data) {
CUDA_KERNEL_LOOP(index, count) {
if (bottom_data[index] == 0) {
diff_data[index] = 0;
}
}
}
template <typename Dtype>
void L1FeatureLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int count = bottom[0]->count();
//caffe_gpu_sub(
// count,
// bottom[0]->gpu_data(),
// bottom[1]->gpu_data(),
// diff_.mutable_gpu_data());
//caffe_copy(count, bottom[0]->gpu_data(), diff_.mutable_gpu_data());
//Dtype dot;
//caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
//Dtype loss = dot / bottom[0]->num() / Dtype(2);
//top[0]->mutable_cpu_data()[0] = loss;
caffe_gpu_abs(count, bottom[0]->gpu_data(), temp_abs_.mutable_gpu_data());
caffe_gpu_div(count, bottom[0]->gpu_data(), temp_abs_.gpu_data(), diff_.mutable_gpu_data());
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* diff_data = diff_.mutable_gpu_data();
//for (int i=0; i < bottom[0]->count(); i++) {
// if ((bottom_data[i]) == 0) {
// diff_data[i] = 0;
// }
//}
shostakovich_sym_no_5<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, diff_data);
CUDA_POST_KERNEL_CHECK;
Dtype sum;
//caffe_gpu_asum(count, temp_abs_.gpu_data(), &sum);
caffe_gpu_dot(count, temp_abs_.gpu_data(), uni_temp_.gpu_data(), &sum);
Dtype loss = sum / bottom[0]->num();
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1FeatureLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
/*for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}*/
const Dtype sign = 1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[0]->num();
caffe_gpu_axpby(
bottom[0]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[0]->mutable_gpu_diff()); // b
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(L1FeatureLossLayer);
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.