hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
370b22c627ef7bbcbe91c7fd4e8d816408be6ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "matmul/SpMat.h"
#include "matmul/GPUVector.h"
#include "lsqr_gpu.h"
#include <rocblas.h>
#include <hipsparse.h>
// method for computing the norm of a vector
double norm(GPUVector &v) {
double norm = v.norm();
return norm;
}
// reads vector from file
void read_vector(char* file_name, double** data, int &n) {
FILE *file = fopen(file_name, "rb");
if (file==NULL) {fputs ("File error\n",stderr); exit (1);}
// get array dimensions from the file name
char *token = strtok(file_name, "_");
token = strtok(NULL, "_");
n = std::stoi( token );
// read all data from file and load it to the vector memory
*data = (double*) malloc (sizeof(double) * n);
if (*data == NULL) {fputs ("Memory error",stderr); exit (2);}
fread(*data,sizeof(double),n,file);
fclose(file);
}
// reads matrix from file and parses it to csr format
void read_sparse_matrix(char* file_name, int** rowPtr, int** colInd, double** val, int& n, int& m, int& totalNnz) {
FILE *file = fopen(file_name, "rb");
if (file==NULL) {fputs ("File error\n",stderr); exit (1);}
// read matrix dimensions from file name
char *token = strtok(file_name, "_");
token = strtok(NULL, "_");
m = std::stoi( token );
token = strtok(NULL, "_");
n = std::stoi( token );
// create arrays for
double *data = (double*) malloc (sizeof(double) * n);
int * rowNnz = (int*) malloc(sizeof(int)*m);
*rowPtr = (int*) malloc(sizeof(int)*(m+1));
totalNnz = 0;
int rowCounter = 0;
(*rowPtr)[0] = 0;
if (data == NULL) {fputs ("Memory error",stderr); exit (2);}
// read each line of the file and count the nnz elements per row
while(fread(data,sizeof(double),n,file)) {
rowNnz[rowCounter] = 0;
for(int i = 0; i < n; i++)
if(std::abs(data[i]) > ZERO)
rowNnz[rowCounter]++;
totalNnz += rowNnz[rowCounter];
rowCounter++;
(*rowPtr)[rowCounter] = totalNnz;
}
// re-read the file and fill values with according column indexes
rewind(file);
*val = (double*) malloc(sizeof(double)*totalNnz);
*colInd = (int*) malloc(sizeof(int)*totalNnz);
int counter = 0;
while(fread(data,sizeof(double),n,file)) {
for(int i = 0; i < n; i++)
if(std::abs(data[i]) > ZERO){
(*val)[counter] = data[i];
(*colInd)[counter] = i;
counter++;
}
}
fclose(file);
FREE(data);
FREE(rowNnz);
}
// expected input: mxn matrix binary file named "matrix_m_n", m vector binary file named "vector_m"
int main(int argc, char *argv[])
{
hipblasHandle_t handle;
hipblasStatus_t status = hipblasCreate(&handle);
hipsparseHandle_t cusparseH;
hipsparseStatus_t cusparseStat = hipsparseCreate(&cusparseH);
assert(cusparseStat == HIPSPARSE_STATUS_SUCCESS);
if (status != HIPBLAS_STATUS_SUCCESS) {
printf("Error creating handle\n");
exit(-1);
}
if(argc < 3) {
printf("Matrix and vector file required\n");
return 0;
}
char* matrix_file_name = argv[1];
char* vector_file_name = argv[2];
int* rowPtr = NULL;
int* colInd = NULL;
double* val = NULL;
int n;
int m;
int totalNnz;
// reads matrix in csr format from file
read_sparse_matrix(matrix_file_name, &rowPtr, &colInd, &val, n, m, totalNnz);
double *vec_data = NULL;
int vec_dim;
// reads vector from file
read_vector(vector_file_name, &vec_data, vec_dim);
if(vec_dim != m) {
printf("Vector dimension (%d) must agree with number of rows (%d) in matrix",vec_dim,m);
return 0;
}
GPUVector b(handle, vec_dim,vec_data);
GPUVector x(handle, n);
SpMat A(rowPtr, colInd, val, m, n, totalNnz, cusparseH);
printf("Starting Calculation (n = %d,m = %d)\n",n,m);
// Start GPU timing
hipEvent_t evStart, evStop;
hipEventCreate(&evStart);
hipEventCreate(&evStop);
hipEventRecord(evStart, 0);
lsqr(A,b,x);
// Stop GPU timing
hipEventRecord(evStop, 0);
hipEventSynchronize(evStop);
float elapsedTime_ms;
hipEventElapsedTime(&elapsedTime_ms, evStart, evStop);
hipEventDestroy(evStart);
hipEventDestroy(evStop);
// get resulting vector
double *x_cpu = new double[n];
hipMemcpy(x_cpu, x.elements, sizeof(double) * n, hipMemcpyDeviceToHost);
printf("elapsed time [s]: %f\n",elapsedTime_ms/1000);
GPUVector residual_vec = dot(A,x) - b;
printf("final residual = %f\n",norm(residual_vec));
// print vector
printf("x = (");
for(int i = 0; i < n; i++)
printf("%f ",x_cpu[i]);
printf(")\n");
FREE(x_cpu);
FREE(rowPtr);
FREE(colInd);
FREE(val);
FREE(vec_data);
return 0;
}
| 370b22c627ef7bbcbe91c7fd4e8d816408be6ba0.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <chrono>
#include <cuda_runtime.h>
#include <cuda.h>
#include "matmul/SpMat.h"
#include "matmul/GPUVector.h"
#include "lsqr_gpu.h"
#include <cublas_v2.h>
#include <cusparse.h>
// method for computing the norm of a vector
double norm(GPUVector &v) {
double norm = v.norm();
return norm;
}
// reads vector from file
void read_vector(char* file_name, double** data, int &n) {
FILE *file = fopen(file_name, "rb");
if (file==NULL) {fputs ("File error\n",stderr); exit (1);}
// get array dimensions from the file name
char *token = strtok(file_name, "_");
token = strtok(NULL, "_");
n = std::stoi( token );
// read all data from file and load it to the vector memory
*data = (double*) malloc (sizeof(double) * n);
if (*data == NULL) {fputs ("Memory error",stderr); exit (2);}
fread(*data,sizeof(double),n,file);
fclose(file);
}
// reads matrix from file and parses it to csr format
void read_sparse_matrix(char* file_name, int** rowPtr, int** colInd, double** val, int& n, int& m, int& totalNnz) {
FILE *file = fopen(file_name, "rb");
if (file==NULL) {fputs ("File error\n",stderr); exit (1);}
// read matrix dimensions from file name
char *token = strtok(file_name, "_");
token = strtok(NULL, "_");
m = std::stoi( token );
token = strtok(NULL, "_");
n = std::stoi( token );
// create arrays for
double *data = (double*) malloc (sizeof(double) * n);
int * rowNnz = (int*) malloc(sizeof(int)*m);
*rowPtr = (int*) malloc(sizeof(int)*(m+1));
totalNnz = 0;
int rowCounter = 0;
(*rowPtr)[0] = 0;
if (data == NULL) {fputs ("Memory error",stderr); exit (2);}
// read each line of the file and count the nnz elements per row
while(fread(data,sizeof(double),n,file)) {
rowNnz[rowCounter] = 0;
for(int i = 0; i < n; i++)
if(std::abs(data[i]) > ZERO)
rowNnz[rowCounter]++;
totalNnz += rowNnz[rowCounter];
rowCounter++;
(*rowPtr)[rowCounter] = totalNnz;
}
// re-read the file and fill values with according column indexes
rewind(file);
*val = (double*) malloc(sizeof(double)*totalNnz);
*colInd = (int*) malloc(sizeof(int)*totalNnz);
int counter = 0;
while(fread(data,sizeof(double),n,file)) {
for(int i = 0; i < n; i++)
if(std::abs(data[i]) > ZERO){
(*val)[counter] = data[i];
(*colInd)[counter] = i;
counter++;
}
}
fclose(file);
FREE(data);
FREE(rowNnz);
}
// expected input: mxn matrix binary file named "matrix_m_n", m vector binary file named "vector_m"
int main(int argc, char *argv[])
{
cublasHandle_t handle;
cublasStatus_t status = cublasCreate(&handle);
cusparseHandle_t cusparseH;
cusparseStatus_t cusparseStat = cusparseCreate(&cusparseH);
assert(cusparseStat == CUSPARSE_STATUS_SUCCESS);
if (status != CUBLAS_STATUS_SUCCESS) {
printf("Error creating handle\n");
exit(-1);
}
if(argc < 3) {
printf("Matrix and vector file required\n");
return 0;
}
char* matrix_file_name = argv[1];
char* vector_file_name = argv[2];
int* rowPtr = NULL;
int* colInd = NULL;
double* val = NULL;
int n;
int m;
int totalNnz;
// reads matrix in csr format from file
read_sparse_matrix(matrix_file_name, &rowPtr, &colInd, &val, n, m, totalNnz);
double *vec_data = NULL;
int vec_dim;
// reads vector from file
read_vector(vector_file_name, &vec_data, vec_dim);
if(vec_dim != m) {
printf("Vector dimension (%d) must agree with number of rows (%d) in matrix",vec_dim,m);
return 0;
}
GPUVector b(handle, vec_dim,vec_data);
GPUVector x(handle, n);
SpMat A(rowPtr, colInd, val, m, n, totalNnz, cusparseH);
printf("Starting Calculation (n = %d,m = %d)\n",n,m);
// Start GPU timing
cudaEvent_t evStart, evStop;
cudaEventCreate(&evStart);
cudaEventCreate(&evStop);
cudaEventRecord(evStart, 0);
lsqr(A,b,x);
// Stop GPU timing
cudaEventRecord(evStop, 0);
cudaEventSynchronize(evStop);
float elapsedTime_ms;
cudaEventElapsedTime(&elapsedTime_ms, evStart, evStop);
cudaEventDestroy(evStart);
cudaEventDestroy(evStop);
// get resulting vector
double *x_cpu = new double[n];
cudaMemcpy(x_cpu, x.elements, sizeof(double) * n, cudaMemcpyDeviceToHost);
printf("elapsed time [s]: %f\n",elapsedTime_ms/1000);
GPUVector residual_vec = dot(A,x) - b;
printf("final residual = %f\n",norm(residual_vec));
// print vector
printf("x = (");
for(int i = 0; i < n; i++)
printf("%f ",x_cpu[i]);
printf(")\n");
FREE(x_cpu);
FREE(rowPtr);
FREE(colInd);
FREE(val);
FREE(vec_data);
return 0;
}
|
9ca3ed840fc369cd07183188183b7953ae9b3fec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma once
#include "cuda_code.cuh"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
#define Window 2
__constant__ float constant_gaussian_kernel[ 25 ];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Gaussian
// shared memory
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Gaussian_kernel_no_shared(IN unsigned char *d_bitmaps, OUT unsigned char *d_Gaussian, long width, long height) {
/*Todo*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Gaussian
// shared memory .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ unsigned char sharedBuffer[];
__global__ void Gaussian_kernel_shared(INOUT unsigned char *d_bitmaps, OUT unsigned char *d_Gaussian, long width, long height) {
/*Todo*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Constant variable gaussian kernel
// gaussian filtering .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Set_Gaussain_Kernel(){
float _1 = 1.0f / 256.0f;
float _4 = _1 * 4;
float _6 = _1 * 6;
float _16 = _1 * 16;
float _24 = _1 * 24;
float _36 = _1 * 36;
float *p_gaussian_kernel = new float[25];
p_gaussian_kernel[0] = p_gaussian_kernel[4] = p_gaussian_kernel[20] = p_gaussian_kernel[24] = _1;
p_gaussian_kernel[1] = p_gaussian_kernel[3] = p_gaussian_kernel[5] = p_gaussian_kernel[9]= _4;
p_gaussian_kernel[15] = p_gaussian_kernel[19] = p_gaussian_kernel[21] = p_gaussian_kernel[23] = _4;
p_gaussian_kernel[2] = p_gaussian_kernel[10] = p_gaussian_kernel[14] = p_gaussian_kernel[22] = _6;
p_gaussian_kernel[6] = p_gaussian_kernel[8] = p_gaussian_kernel[16] = p_gaussian_kernel[18] = _16;
p_gaussian_kernel[7] = p_gaussian_kernel[11] =p_gaussian_kernel[13] = p_gaussian_kernel[17] = _24;
p_gaussian_kernel[12] = _36;
hipMemcpyToSymbol( constant_gaussian_kernel, p_gaussian_kernel, sizeof( float ) * 25 );
delete[] p_gaussian_kernel;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
// Shared_flag NO_SHARED SHARED
// flag
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float Do_Gaussian_on_GPU(IN unsigned char *p_bitmaps, OUT unsigned char *p_Gaussian, long width, long height, int Shared_flag)
{
/*Todo*/
return device_time;
} | 9ca3ed840fc369cd07183188183b7953ae9b3fec.cu | #pragma once
#include "cuda_code.cuh"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
#define Window 2
__constant__ float constant_gaussian_kernel[ 25 ];
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Gaussian ํํฐ๋ง์ ํ๋ ์ปค๋
// shared memory๋ฅผ ์ฌ์ฉํ์ง ์๋๋ค
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Gaussian_kernel_no_shared(IN unsigned char *d_bitmaps, OUT unsigned char *d_Gaussian, long width, long height) {
/*Todo*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Gaussian ํํฐ๋ง์ ํ๋ ์ปค๋
// shared memory๋ฅผ ์ฌ์ฉํ๋ค.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ unsigned char sharedBuffer[];
__global__ void Gaussian_kernel_shared(INOUT unsigned char *d_bitmaps, OUT unsigned char *d_Gaussian, long width, long height) {
/*Todo*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Constant variable ์ธ gaussian kernel์ ์ค์ ํ๋ ํจ์
// ํ์ gaussian filtering ์์ ์ฌ์ฉํ๋ค.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Set_Gaussain_Kernel(){
float _1 = 1.0f / 256.0f;
float _4 = _1 * 4;
float _6 = _1 * 6;
float _16 = _1 * 16;
float _24 = _1 * 24;
float _36 = _1 * 36;
float *p_gaussian_kernel = new float[25];
p_gaussian_kernel[0] = p_gaussian_kernel[4] = p_gaussian_kernel[20] = p_gaussian_kernel[24] = _1;
p_gaussian_kernel[1] = p_gaussian_kernel[3] = p_gaussian_kernel[5] = p_gaussian_kernel[9]= _4;
p_gaussian_kernel[15] = p_gaussian_kernel[19] = p_gaussian_kernel[21] = p_gaussian_kernel[23] = _4;
p_gaussian_kernel[2] = p_gaussian_kernel[10] = p_gaussian_kernel[14] = p_gaussian_kernel[22] = _6;
p_gaussian_kernel[6] = p_gaussian_kernel[8] = p_gaussian_kernel[16] = p_gaussian_kernel[18] = _16;
p_gaussian_kernel[7] = p_gaussian_kernel[11] =p_gaussian_kernel[13] = p_gaussian_kernel[17] = _24;
p_gaussian_kernel[12] = _36;
cudaMemcpyToSymbol( constant_gaussian_kernel, p_gaussian_kernel, sizeof( float ) * 25 );
delete[] p_gaussian_kernel;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// ์ปค๋์ ์คํํ๊ธฐ ์ ํ์ํ ์๋ฃ๋ค ์ค๋น ๋ฐ ์ปค๋์ ์คํํ ๋๋ฐ์ด์ค๋ฅผ ์ค์
// Shared_flag ์
๋ ฅ ์ NO_SHARED ๋ SHARED ์ค ํ ๊ฐ์ ๋งคํฌ๋ก๋ฅผ ๋ฃ์ผ๋ฉด
// flag๊ฐ์ ๋ง๋ ์ปค๋์ ์คํ
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float Do_Gaussian_on_GPU(IN unsigned char *p_bitmaps, OUT unsigned char *p_Gaussian, long width, long height, int Shared_flag)
{
/*Todo*/
return device_time;
} |
3b3a5296adf47c9a424324c737a7a8ba37269929.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <chrono>
#include "RadixSort.h"
#include "Scan.h"
void makeRandomUintVector(unsigned int *a, unsigned int numElements, unsigned int keybits);
bool verifySortUint(unsigned int *keysSorted,
unsigned int *valuesSorted,
unsigned int *keysUnsorted,
unsigned int len);
int main(int argc, const char **argv)
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int numIterations = atoi(argv[1]);
const unsigned int numElements = 128*128*128*2;
const int keybits = 32; // bit size of uint
const int batchSize = 1; // only support a batch size of 1
const unsigned int numBlocks = ((numElements % (CTA_SIZE * 4)) == 0) ?
(numElements / (CTA_SIZE * 4)) : (numElements / (CTA_SIZE * 4) + 1);
// Check power-of-two factorization before the scan operations start
unsigned int arrayLength = numElements/2/CTA_SIZE*16;
unsigned int log2L;
unsigned int factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
assert(arrayLength > MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE);
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
// Alloc and init some data on the host, then alloc and init GPU buffer
unsigned int* h_keys = (unsigned int*)malloc(numElements * sizeof(unsigned int));
unsigned int* h_keysSorted = (unsigned int*)malloc(numElements * sizeof(unsigned int));
makeRandomUintVector(h_keys, numElements, keybits);
#ifdef DEBUG
printf("#elements: %u #blocks: %u\n", numElements, numBlocks);
for (int i = 0; i < numElements; i++) printf("init key %d: %x\n", i, h_keys[i]);
#endif
unsigned int* d_keys;
hipMalloc((void**)&d_keys, numElements*sizeof(unsigned int));
hipMemcpy(d_keys, h_keys, numElements*sizeof(unsigned int), hipMemcpyHostToDevice);
unsigned int* d_tempKeys;
hipMalloc((void**)&d_tempKeys, numElements*sizeof(unsigned int));
unsigned int* d_counters;
hipMalloc((void**)&d_counters, WARP_SIZE*numBlocks*sizeof(unsigned int));
unsigned int* d_countersSum;
hipMalloc((void**)&d_countersSum, WARP_SIZE*numBlocks*sizeof(unsigned int));
unsigned int* d_blockOffsets;
hipMalloc((void**)&d_blockOffsets, WARP_SIZE*numBlocks*sizeof(unsigned int));
// Allocate the buffer once though it is internally used by the scan operations
unsigned int* d_buffer;
hipMalloc((void**)&d_buffer, sizeof(unsigned int) *
(arrayLength / MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE));
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numIterations; i++)
{
radixSortKeys(d_keys, d_tempKeys, d_counters, d_blockOffsets, d_countersSum,
d_buffer, numElements, keybits, batchSize);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of radixsort: %f (s)\n", (time * 1e-9f) / numIterations);
// copy sorted keys to CPU
hipMemcpy(h_keysSorted, d_keys, numElements*sizeof(unsigned int), hipMemcpyDeviceToHost);
#ifdef DEBUG
for (int i = 0; i < numElements; i++) printf("sorted key %d: %x\n", i, h_keysSorted[i]);
#endif
// Check results
bool passed = true;
passed &= verifySortUint(h_keysSorted, NULL, h_keys, numElements);
free(h_keys);
free(h_keysSorted);
hipFree(d_keys);
hipFree(d_tempKeys);
hipFree(d_counters);
hipFree(d_countersSum);
hipFree(d_blockOffsets);
hipFree(d_buffer);
// finish
if (passed)
printf("PASS\n");
else
printf("FAIL\n");
return 0;
}
void makeRandomUintVector(unsigned int *a, unsigned int numElements, unsigned int keybits)
{
// Fill up with some random data
int keyshiftmask = 0;
if (keybits > 16) keyshiftmask = (1 << (keybits - 16)) - 1;
int keymask = 0xffff;
if (keybits < 16) keymask = (1 << keybits) - 1;
srand(95123);
for(unsigned int i=0; i < numElements; ++i)
{
a[i] = ((rand() & keyshiftmask)<<16) | (rand() & keymask);
}
}
// assumes the values were initially indices into the array, for simplicity of
// checking correct order of values
bool verifySortUint(unsigned int *keysSorted,
unsigned int *valuesSorted,
unsigned int *keysUnsorted,
unsigned int len)
{
bool passed = true;
for(unsigned int i=0; i<len-1; ++i)
{
if( (keysSorted[i])>(keysSorted[i+1]) )
{
printf("Unordered key[%d]: %d > key[%d]: %d\n", i, keysSorted[i], i+1, keysSorted[i+1]);
passed = false;
break;
}
}
if (valuesSorted)
{
for(unsigned int i=0; i<len; ++i)
{
if( keysUnsorted[valuesSorted[i]] != keysSorted[i] )
{
printf("Incorrectly sorted value[%u] (%u): %u != %u\n",
i, valuesSorted[i], keysUnsorted[valuesSorted[i]], keysSorted[i]);
passed = false;
break;
}
}
}
return passed;
}
| 3b3a5296adf47c9a424324c737a7a8ba37269929.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <chrono>
#include "RadixSort.h"
#include "Scan.h"
void makeRandomUintVector(unsigned int *a, unsigned int numElements, unsigned int keybits);
bool verifySortUint(unsigned int *keysSorted,
unsigned int *valuesSorted,
unsigned int *keysUnsorted,
unsigned int len);
int main(int argc, const char **argv)
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int numIterations = atoi(argv[1]);
const unsigned int numElements = 128*128*128*2;
const int keybits = 32; // bit size of uint
const int batchSize = 1; // only support a batch size of 1
const unsigned int numBlocks = ((numElements % (CTA_SIZE * 4)) == 0) ?
(numElements / (CTA_SIZE * 4)) : (numElements / (CTA_SIZE * 4) + 1);
// Check power-of-two factorization before the scan operations start
unsigned int arrayLength = numElements/2/CTA_SIZE*16;
unsigned int log2L;
unsigned int factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
assert(arrayLength > MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE);
//Check total batch size limit
assert((batchSize * arrayLength) <= MAX_BATCH_ELEMENTS);
// Alloc and init some data on the host, then alloc and init GPU buffer
unsigned int* h_keys = (unsigned int*)malloc(numElements * sizeof(unsigned int));
unsigned int* h_keysSorted = (unsigned int*)malloc(numElements * sizeof(unsigned int));
makeRandomUintVector(h_keys, numElements, keybits);
#ifdef DEBUG
printf("#elements: %u #blocks: %u\n", numElements, numBlocks);
for (int i = 0; i < numElements; i++) printf("init key %d: %x\n", i, h_keys[i]);
#endif
unsigned int* d_keys;
cudaMalloc((void**)&d_keys, numElements*sizeof(unsigned int));
cudaMemcpy(d_keys, h_keys, numElements*sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int* d_tempKeys;
cudaMalloc((void**)&d_tempKeys, numElements*sizeof(unsigned int));
unsigned int* d_counters;
cudaMalloc((void**)&d_counters, WARP_SIZE*numBlocks*sizeof(unsigned int));
unsigned int* d_countersSum;
cudaMalloc((void**)&d_countersSum, WARP_SIZE*numBlocks*sizeof(unsigned int));
unsigned int* d_blockOffsets;
cudaMalloc((void**)&d_blockOffsets, WARP_SIZE*numBlocks*sizeof(unsigned int));
// Allocate the buffer once though it is internally used by the scan operations
unsigned int* d_buffer;
cudaMalloc((void**)&d_buffer, sizeof(unsigned int) *
(arrayLength / MAX_WORKGROUP_INCLUSIVE_SCAN_SIZE));
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < numIterations; i++)
{
radixSortKeys(d_keys, d_tempKeys, d_counters, d_blockOffsets, d_countersSum,
d_buffer, numElements, keybits, batchSize);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of radixsort: %f (s)\n", (time * 1e-9f) / numIterations);
// copy sorted keys to CPU
cudaMemcpy(h_keysSorted, d_keys, numElements*sizeof(unsigned int), cudaMemcpyDeviceToHost);
#ifdef DEBUG
for (int i = 0; i < numElements; i++) printf("sorted key %d: %x\n", i, h_keysSorted[i]);
#endif
// Check results
bool passed = true;
passed &= verifySortUint(h_keysSorted, NULL, h_keys, numElements);
free(h_keys);
free(h_keysSorted);
cudaFree(d_keys);
cudaFree(d_tempKeys);
cudaFree(d_counters);
cudaFree(d_countersSum);
cudaFree(d_blockOffsets);
cudaFree(d_buffer);
// finish
if (passed)
printf("PASS\n");
else
printf("FAIL\n");
return 0;
}
void makeRandomUintVector(unsigned int *a, unsigned int numElements, unsigned int keybits)
{
// Fill up with some random data
int keyshiftmask = 0;
if (keybits > 16) keyshiftmask = (1 << (keybits - 16)) - 1;
int keymask = 0xffff;
if (keybits < 16) keymask = (1 << keybits) - 1;
srand(95123);
for(unsigned int i=0; i < numElements; ++i)
{
a[i] = ((rand() & keyshiftmask)<<16) | (rand() & keymask);
}
}
// assumes the values were initially indices into the array, for simplicity of
// checking correct order of values
bool verifySortUint(unsigned int *keysSorted,
unsigned int *valuesSorted,
unsigned int *keysUnsorted,
unsigned int len)
{
bool passed = true;
for(unsigned int i=0; i<len-1; ++i)
{
if( (keysSorted[i])>(keysSorted[i+1]) )
{
printf("Unordered key[%d]: %d > key[%d]: %d\n", i, keysSorted[i], i+1, keysSorted[i+1]);
passed = false;
break;
}
}
if (valuesSorted)
{
for(unsigned int i=0; i<len; ++i)
{
if( keysUnsorted[valuesSorted[i]] != keysSorted[i] )
{
printf("Incorrectly sorted value[%u] (%u): %u != %u\n",
i, valuesSorted[i], keysUnsorted[valuesSorted[i]], keysSorted[i]);
passed = false;
break;
}
}
}
return passed;
}
|
a088522e4463453b5fa5242e1f24d697fdb34ccd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
//function declaration
unsigned int getmax(unsigned int *, unsigned int);
//unsigned int getmaxSeq(unsigned int *, unsigned int);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2) {
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers ) {
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++) {
numbers[i] = rand() % size;
}
printf("The maximum number in the array is: %u\n", getmax(numbers, size));
free(numbers);
exit(0);
}
__global__ void getmaxcu(unsigned int* num, int size, int threadCount)
{
__shared__ int localBiggest[32];
if (threadIdx.x==0) {
for (int i = 0; i < 32; i++) {
localBiggest[i] = 0;
}
}
__syncthreads();
int current = blockIdx.x *blockDim.x + threadIdx.x; //get current thread ID
int localBiggestCurrent = (current - blockIdx.x *blockDim.x)/32; //get currentID's warp number
//if current number is bigger than the biggest number so far in the warp, replace it
if ((num[current] > localBiggest[localBiggestCurrent]) && (current < size)) {
localBiggest[localBiggestCurrent] = num[current];
}
__syncthreads();
//using only one thread, loop through all the biggest numbers in each warp
//and return the biggest number out of them all
if (threadIdx.x==0) {
int biggest = localBiggest[0];
for (int i = 1; i < 32; i++) {
if (biggest < localBiggest[i]) {
biggest = localBiggest[i];
}
}
//once found the biggest number in this block, put back into global array
//num with corresponding block number
num[blockIdx.x] = biggest;
}
}
unsigned int getmax(unsigned int num[], unsigned int size)
{
//get max threads per block. Since the two devices on the GPU cluster are the same,
//I only got the property from one of the device
int maxThreadsPerBlock, block;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
maxThreadsPerBlock = prop.maxThreadsPerBlock;
//get numbers of blocks needed depending on size and max threads per block
block = (size / maxThreadsPerBlock) + 1;
if (size % maxThreadsPerBlock == 0) {
block = size / maxThreadsPerBlock;
}
unsigned int* device_num;
hipSetDevice(1);
hipMalloc((void **) &device_num, size*sizeof(unsigned int));
hipMemcpy(device_num, num, size*sizeof(unsigned int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( getmaxcu), dim3(block),dim3(maxThreadsPerBlock), 32, 0, device_num, size, maxThreadsPerBlock);
hipMemcpy(num, device_num, size*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipFree(device_num);
//using what we calculated, get the biggest number from each block
int answer = num[0];
for (int i = 1; i < block; i++) {
if (answer < num[i]) {
answer = num[i];
}
}
return answer;
}
| a088522e4463453b5fa5242e1f24d697fdb34ccd.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
//function declaration
unsigned int getmax(unsigned int *, unsigned int);
//unsigned int getmaxSeq(unsigned int *, unsigned int);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2) {
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers ) {
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++) {
numbers[i] = rand() % size;
}
printf("The maximum number in the array is: %u\n", getmax(numbers, size));
free(numbers);
exit(0);
}
__global__ void getmaxcu(unsigned int* num, int size, int threadCount)
{
__shared__ int localBiggest[32];
if (threadIdx.x==0) {
for (int i = 0; i < 32; i++) {
localBiggest[i] = 0;
}
}
__syncthreads();
int current = blockIdx.x *blockDim.x + threadIdx.x; //get current thread ID
int localBiggestCurrent = (current - blockIdx.x *blockDim.x)/32; //get currentID's warp number
//if current number is bigger than the biggest number so far in the warp, replace it
if ((num[current] > localBiggest[localBiggestCurrent]) && (current < size)) {
localBiggest[localBiggestCurrent] = num[current];
}
__syncthreads();
//using only one thread, loop through all the biggest numbers in each warp
//and return the biggest number out of them all
if (threadIdx.x==0) {
int biggest = localBiggest[0];
for (int i = 1; i < 32; i++) {
if (biggest < localBiggest[i]) {
biggest = localBiggest[i];
}
}
//once found the biggest number in this block, put back into global array
//num with corresponding block number
num[blockIdx.x] = biggest;
}
}
unsigned int getmax(unsigned int num[], unsigned int size)
{
//get max threads per block. Since the two devices on the GPU cluster are the same,
//I only got the property from one of the device
int maxThreadsPerBlock, block;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
maxThreadsPerBlock = prop.maxThreadsPerBlock;
//get numbers of blocks needed depending on size and max threads per block
block = (size / maxThreadsPerBlock) + 1;
if (size % maxThreadsPerBlock == 0) {
block = size / maxThreadsPerBlock;
}
unsigned int* device_num;
cudaSetDevice(1);
cudaMalloc((void **) &device_num, size*sizeof(unsigned int));
cudaMemcpy(device_num, num, size*sizeof(unsigned int), cudaMemcpyHostToDevice);
getmaxcu<<<block,maxThreadsPerBlock, 32>>>(device_num, size, maxThreadsPerBlock);
cudaMemcpy(num, device_num, size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(device_num);
//using what we calculated, get the biggest number from each block
int answer = num[0];
for (int i = 1; i < block; i++) {
if (answer < num[i]) {
answer = num[i];
}
}
return answer;
}
|
StreamCompaction.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Created by Mohsen Safari.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////////////
// Pure Functions
////////////////////////////////////////////////////////////////////////////////
/*@
requires 0 <= p;
ensures p < \result;
pure int ExpTwo(int p) = 0 < p ? 2 * ExpTwo(p - 1) : 1;
@*/
/*@
ensures |xs| == 0 ==> \result == 0;
ensures |xs| == 1 ==> \result == head(xs);
pure int intsum(seq<int> xs) =
0 < |xs| ? head(xs) + intsum(tail(xs)) : 0;
@*/
/*@
requires n <= |xs|;
ensures n < 0 ==> |Take(xs, n)| == 0;
ensures 0 <= n ==> |Take(xs, n)| == n;
ensures (\forall int i; 0 <= i && i < n; xs[i] == get(Take(xs, n), i));
pure seq<int> Take(seq<int> xs, int n) =
0 < n ? seq<int> { head(xs) } + Take(tail(xs), n - 1) : seq<int> { };
@*/
/*@
requires 0 <= i && i <= |xs|;
ensures |\result| == |xs| - i;
ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, i+j)));
pure seq<int> psum(seq<int> xs, int i) =
i < |xs| ? seq<int> { intsum(Take(xs, i)) } + psum(xs, i + 1) : seq<int> { };
@*/
/*@
ensures |\result| == |xs|;
ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, j)));
pure seq<int> psum2(seq<int> xs) = psum(xs, 0);
@*/
/*@
requires |xs| >= 0;
ensures |xs| == 0 ==> \result == xs;
ensures |xs| == 1 ==> \result == xs;
ensures |xs| == 2 ==> \result == seq<int> { head(xs) + head(tail(xs)) };
ensures |xs| % 2 == 0 ==> |\result| == |xs| / 2;
pure seq<int> implode(seq<int> xs) =
1 < |xs| ? seq<int> { head(xs) + head(tail(xs)) } + implode(tail(tail(xs))) : xs;
@*/
/*@
requires 0 <= p;
pure int exp(int n, int p) = 0 < p ? n * exp(n, p - 1) : 1;
@*/
/*@
requires 0 <= n;
requires n < |xs|;
pure int get(seq<int> xs, int n) = xs[n];
@*/
/*@
requires k > 0;
requires |xs| == ExpTwo(k);
requires i >= 0 && i <= |xs|;
requires 1 <= lvl && lvl <= k;
requires stride == ExpTwo(lvl-1);
requires stride > 0 && stride < |xs|;
ensures |\result| == |xs| - i;
ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && ((i+j) >= stride) && (((i+j) % (2*stride)) == (2*stride-1))) ==> \result[j] == xs[i+j] + xs[i+j - stride]);
ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && (((i+j) < stride) || (((i+j) % (2*stride)) != (2*stride-1)))) ==> \result[j] == xs[i+j]);
pure seq<int> up(seq<int> xs, int stride, int i, int k, int lvl) =
i < |xs| ? (
((i % (2*stride)) == (2*stride-1) && (i >= stride)?
seq<int> {xs[i] + xs[i-stride]} + up(xs, stride, i+1, k, lvl)
:
seq<int> {xs[i]} + up(xs, stride, i+1, k, lvl) ))
:
seq<int> {};
@*/
/*@
requires (\forall int i; 0 <= i && i < |xs|; xs[i] == 0 || xs[i] == 1);
ensures \result == intsum(xs);
ensures \result >= 0;
pure int count(seq<int> xs) =
|xs| > 0 ? ( head(xs) == 1 ? 1+count(tail(xs)) : count(tail(xs)) ) : 0;
@*/
/*@
requires |flags| == |input|;
requires (\forall int j; 0 <= j && j < |flags|; flags[j] == 0 || flags[j] == 1);
ensures |\result| == intsum(flags);
ensures 0 <= |\result| && |\result| <= |flags|;
pure seq<int> compact(seq<int> input, seq<int> flags) =
0 < |input| ?
(head(flags) == 1 ? seq<int> { head(input) } + compact(tail(input), tail(flags)) : compact(tail(input), tail(flags)))
:
seq<int> { };
@*/
////////////////////////////////////////////////////////////////////////////////////////Lemmas
/* The sum of a list of non-negative integers is itself non-negative. */
/*@
requires (\forall int i; 0 <= i && i < |xs|; 0 <= xs[i]);
ensures \result && 0 <= intsum(xs);
pure bool lemma_sum_nonneg(seq<int> xs);
@*/
/*@
ensures \result && intsum(seq<int> { }) == 0;
pure bool lemma_intsum_zero();
@*/
/*@
ensures \result && psum2(seq<int> { }) == seq<int> { };
pure bool lemma_psum_zero();
@*/
/*@
ensures \result && intsum(seq<int> { x }) == x;
pure bool lemma_intsum_single(int x);
@*/
/*@
requires |xs| == 1;
ensures \result && psum2(xs) == seq<int> {0};
pure bool lemma_psum_single(seq<int> xs);
@*/
/*@
requires |xs| >= 0;
requires |ys| >= 0;
ensures \result && |xs| == 0 ==> intsum(xs + ys) == intsum(ys);
ensures \result && |ys| == 0 ==> intsum(xs + ys) == intsum(xs);
ensures \result && |xs + ys| == |xs| + |ys|;
ensures \result && intsum(tail(xs) + ys) == intsum(tail(xs)) + intsum(ys);
ensures \result && intsum(xs + ys) == intsum(xs) + intsum(ys);
pure bool lemma_intsum_app(seq<int> xs, seq<int> ys);
@*/
/*@
requires |xs| <= 1;
ensures \result && xs == implode(xs);
pure bool lemma_implode_base(seq<int> xs);
@*/
/*@
ensures \result && intsum(xs) == intsum(implode(xs));
pure bool lemma_implode_sum(seq<int> xs);
@*/
/*@
requires 0 < n;
ensures \result && ExpTwo(n) == 2 * ExpTwo(n - 1);
pure bool lemma_exp2_red_mult(int n);
@*/
/*@
requires 0 < n;
ensures \result && ExpTwo(n) / 2 == ExpTwo(n - 1);
pure bool lemma_exp2_red_div(int n);
@*/
/*@
requires 0 <= n;
ensures \result && 0 < ExpTwo(n);
pure bool lemma_exp2_positive(int n);
@*/
/*@
requires 0 <= i;
requires i <= j;
ensures \result && ExpTwo(i) <= ExpTwo(j);
pure bool lemma_exp2_leq(int i, int j);
@*/
/*@
requires i >= 0 && j >= 0;
requires ExpTwo(i) == ExpTwo(j);
ensures \result && i == j;
pure bool power_two_lemma(int i, int j);
@*/
/*@
requires |xs| % 2 == 0;
ensures \result && |implode(xs)| == |xs| / 2;
pure bool lemma_implode_length_mod_two(seq<int> xs);
@*/
/*@
requires 0 < n && |xs| == ExpTwo(n);
ensures \result && |implode(xs)| == ExpTwo(n - 1);
pure bool lemma_implode_red_exp2(seq<int> xs, int n);
@*/
/*@
requires 0 < i;
requires i < |xs|;
ensures \result && get(tail(xs), i - 1) == xs[i];
pure bool lemma_intseq_index_tail(seq<int> xs, int i);
@*/
/*@
requires |xs| % 2 == 0;
requires 0 <= i && i < |implode(xs)|;
requires (2 * i) < |xs|;
requires (2 * i + 1) < |xs|;
ensures \result && get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1];
pure bool lemma_implode_get(seq<int> xs, int i);
@*/
/*@
requires |xs| % 2 == 0;
requires |implode(xs)| == |xs|/2;
ensures \result && (\forall int i; 0 <= i && i < |implode(xs)|; get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1]);
pure bool lemma_implode_get_all(seq<int> xs);
@*/
/*@
requires |xs| == 2 * |ys|;
requires 0 <= |ys|;
requires (\forall int i; 0 <= i && i < |ys|; ys[i] == xs[2*i] + xs[2*i+1]);
ensures \result && ys == implode(xs);
pure bool lemma_implode_rel(seq<int> xs, seq<int> ys);
@*/
/*@
requires 0 <= i && i < |xs|;
ensures \result && get(psum2(xs), i) == intsum(Take(xs, i));
pure bool lemma_psum_get(seq<int> xs, int i);
@*/
/*@
ensures \result && (\forall int i; 0 <= i && i < |xs|; get(psum2(xs), i) == intsum(Take(xs, i)));
pure bool lemma_psum_get_all(seq<int> xs);
@*/
/*@
requires 0 < n && n <= |xs|;
ensures \result && Take(xs, n) == Take(xs, n - 1) + seq<int> { xs[n - 1] };
pure bool missing_lemma_2(seq<int> xs, int n);
@*/
/*@
requires |xs| % 2 == 0;
requires |ys| % 2 == 0;
ensures \result && implode(xs + ys) == implode(xs) + implode(ys);
pure bool missing_lemma_3(seq<int> xs, seq<int> ys);
@*/
/*@
ensures \result && xs + (ys + zs) == (xs + ys) + zs;
pure bool intseq_concat_assoc(seq<int> xs, seq<int> ys, seq<int> zs);
@*/
/*@
requires |xs| % 2 == 0;
requires 0 <= n && n < |implode(xs)|;
requires |implode(xs)| == |xs| / 2;
ensures \result && Take(implode(xs), n) == implode(Take(xs, 2 * n));
pure bool missing_lemma(seq<int> xs, int n);
@*/
/*@
requires |xs| % 2 == 0;
requires |implode(xs)| == |xs|/2;
requires 0 <= i && i < |implode(xs)|;
requires 2 * i < |xs|;
ensures \result && get(psum2(implode(xs)), i) == intsum(Take(xs, 2 * i));
pure bool lemma_psum_Take2(seq<int> xs, int i);
@*/
/*@
requires |xs| % 2 == 0;
requires |implode(xs)| == |xs|/2;
requires 0 <= i && i < |implode(xs)|;
requires 2 * i < |xs|;
ensures \result && get(psum2(implode(xs)), i) == get(psum2(xs), 2 * i);
pure bool lemma_get_psum_implode(seq<int> xs, int i);
@*/
/*@
requires 0 <= i;
requires 2 * i + 1 < |xs|;
ensures \result && get(psum2(xs), 2 * i + 1) == get(psum2(xs), 2 * i) + get(xs, 2 * i);
pure bool lemma_combine_psum(seq<int> xs, int i);
@*/
/*@
requires (\forall int j; 0 <= j && j < |xs|; xs[j] == 0 || xs[j] == 1);
ensures \result && intsum(xs) >= 0;
pure bool lemma_intsum_positive(seq<int> xs);
@*/
/*@
requires i >= 0;
requires i < |xs|;
requires (\forall int j; 0 <= j && j < i; xs[j] == 0 || xs[j] == 1);
ensures \result && i < |Take(xs, i)| ==> intsum(Take(xs, i)) >= 0;
pure bool lemma_intsum_flag(seq<int> xs, int i) = true;
@*/
/* assuming all elements in `xs` are non-negative, the sum of any sublist of `xs` will not be bigger than the sum of `xs`. */
/*@
requires n <= |xs|;
requires (\forall int i; 0 <= i && i < |xs|; 0 <= xs[i]);
ensures \result && 0 <= intsum(Take(xs, n)) && intsum(Take(xs, n)) <= intsum(xs);
pure bool lemma_take_sum(seq<int> xs, int n);
@*/
/*@
requires 0 <= n && n < |xs|;
requires (\forall int i; 0 <= i && i < |xs|; xs[i] == 0 || xs[i] == 1);
ensures \result && intsum(Take(xs, n)) < |xs|;
pure bool lemma_flags_take_size(seq<int> xs, int n);
@*/
/*@
requires 0 <= n && n < |xs|;
requires (\forall int i; 0 <= i && i < |xs|; xs[i] == 0 || xs[i] == 1);
requires xs[n] == 1;
ensures \result && intsum(Take(xs, n)) < intsum(xs);
pure bool lemma_sum_pos_flag(seq<int> xs, int n);
@*/
/*@
requires 0 <= n && n < |flags|;
requires flags[n] == 1;
requires |flags| == |input|;
requires (\forall int j; 0 <= j && j < |flags|; flags[j] == 0 || flags[j] == 1);
ensures \result && 0 <= intsum(Take(flags, n));
ensures \result && intsum(Take(flags, n)) < intsum(flags);
ensures \result && intsum(Take(flags, n)) < |flags|;
ensures \result && input[n] == get(compact(input, flags), intsum(Take(flags, n)));
ensures \result && input[n] == get(compact(input, flags), get(psum2(flags), n));
pure bool lemma_correctness(seq<int> input, seq<int> flags, int n);
@*/
////////////////////////////////////////////////////////////////////////////////
//Kernel
////////////////////////////////////////////////////////////////////////////////
/*@
context_everywhere flag_after_prefix != NULL;
context_everywhere flag_before_prefix != NULL;
context_everywhere input != NULL;
context_everywhere output != NULL;
context_everywhere k == 10;
context_everywhere M == 8;
context_everywhere opencl_gsize == ExpTwo(k);
context_everywhere opencl_gcount == 1;
requires \ltid < (ExpTwo(k)+2-1)/2 ==> \pointer_index(flag_after_prefix, 2*\ltid, write);
requires \ltid < (ExpTwo(k)-1+2-1)/2 ==> \pointer_index(flag_after_prefix, 2*\ltid+1, write);
requires (\ltid >= 0 && \ltid <= 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % 1 != 0; \pointer_index(flag_after_prefix, i, write));
requires \pointer_index(input, \ltid, 1\2);
requires \pointer_index(flag_before_prefix, \ltid, 1\2);
requires flag_before_prefix[\ltid] == 0 || flag_before_prefix[\ltid] == 1;
requires \ltid < ExpTwo(M) ==> \pointer_index(output, \ltid, write);
@*/
__global__ void CUDA_Kernel_Stream_Compaction(int* input, int* output, int* flag_before_prefix, int* flag_after_prefix, int k, int M)
{
int tid = threadIdx.x;
//@ assert tid == \ltid;
//@ ghost seq<int> flag_seq;
//@ assume |flag_seq| == ExpTwo(k);
//@ assume (\forall int i; 0 <= i && i < ExpTwo(k); flag_seq[i] == 0 || flag_seq[i] == 1);
//@ assume (flag_seq[tid] == 0 || flag_seq[tid] == 1) && count(flag_seq) == ExpTwo(M);
//@ assume (2 * tid < ExpTwo(k)) ==> flag_after_prefix[2 * tid] == flag_seq[2 * tid];
//@ assume (2 * tid + 1 < ExpTwo(k)) ==> flag_after_prefix[2 * tid + 1] == flag_seq[2 * tid + 1];
//@ assume flag_seq[tid] == flag_before_prefix[tid];
//@ assume 2 * tid < ExpTwo(k) ==> flag_after_prefix[2 * tid] == 0 || flag_after_prefix[2 * tid] == 1;
//@ assume 2 * tid + 1 < ExpTwo(k) ==> flag_after_prefix[2 * tid + 1] == 0 || flag_after_prefix[2 * tid + 1] == 1;
//@ ghost seq<int> inp;
//@ assume |inp| == ExpTwo(k) && inp[tid] == input[tid];
int indicator = 2 * tid + 1;
int stride = 1;
int lvl = 1;
//@ ghost seq<seq<int> > Matrix_UP = seq<seq<int> > { flag_seq };
//@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i));
//@ ghost seq<seq<int> > Matrix = seq<seq<int> > { flag_seq };
/*@
loop_invariant k > 0;
loop_invariant tid >= 0 && tid < ExpTwo(k);
loop_invariant stride > 0;
loop_invariant 1 <= lvl;
loop_invariant stride == ExpTwo(lvl-1);
loop_invariant lvl <= k+1;
loop_invariant indicator + 1 == ExpTwo(lvl)*(tid+1);
loop_invariant indicator + 1 == 2*stride*(tid+1);
loop_invariant indicator > 0;
loop_invariant stride <= ExpTwo(k);
loop_invariant indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
loop_invariant tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
loop_invariant (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(flag_after_prefix, ExpTwo(k) - 1, 1);
loop_invariant |Matrix_UP| == lvl;
loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix_UP[i]| == ExpTwo(k));
loop_invariant lvl == 1 ==> Matrix_UP[lvl - 1] == flag_seq;
loop_invariant lvl > 1 && lvl < |Matrix_UP| ==> Matrix_UP[lvl] == up(Matrix_UP[lvl - 1], (stride/2) - 1, 0, k, lvl - 1);
loop_invariant indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == flag_after_prefix[indicator];
loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == flag_after_prefix[indicator - stride];
loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][ExpTwo(k) - 1] == intsum(flag_seq);
loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][(ExpTwo(k) - 1)/2] == intsum(Take(flag_seq, |flag_seq|/2));
loop_invariant |Matrix| == lvl;
loop_invariant (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i));
loop_invariant (\forall int i; 0 < i && i < lvl; Matrix[i] == implode(Matrix[i - 1]));
loop_invariant (\forall int i; 0 <= i && i < lvl; intsum(Matrix[i]) == intsum(flag_seq));
loop_invariant Matrix[0] == flag_seq;
loop_invariant indicator < ExpTwo(k) && 2 * tid + 1 < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1];
loop_invariant indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
@*/
while(stride < ExpTwo(k))
{
if(indicator < ExpTwo(k) && indicator >= stride)
{
//@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1];
//@ assert 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
flag_after_prefix[indicator] = flag_after_prefix[indicator] + flag_after_prefix[indicator - stride];
//@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid];
}
//@ assert lemma_implode_length_mod_two(Matrix[lvl - 1]);
//@ assert lemma_implode_sum(Matrix[lvl - 1]);
//@ assert lemma_implode_get_all(Matrix[lvl - 1]);
//@ ghost Matrix = Matrix + seq<seq<int> > { implode(Matrix[lvl - 1]) };
//@ ghost tid < |implode(Matrix[lvl - 1])| ? (lemma_implode_get(Matrix[lvl - 1], tid) && (2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1]) && (indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]) && (Matrix[lvl] == implode(Matrix[lvl - 1])) && (indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl][tid])) : true;
/*if(tid < |implode(Matrix[lvl - 1])|){
lemma_implode_get(Matrix[lvl - 1], tid);
assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1];
assert indicator < flag_after_prefix.length && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid];
assert Matrix[lvl] == implode(Matrix[lvl - 1]);
assert indicator < flag_after_prefix.length && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl][tid];
}*/
/*@
context_everywhere k > 0;
context_everywhere 1 <= lvl && lvl <= k;
context_everywhere |Matrix| == lvl + 1;
requires tid >= 0 && tid < ExpTwo(k);
requires stride == ExpTwo(lvl-1);
requires stride > 0 && stride < ExpTwo(k);
requires indicator + 1 == ExpTwo(lvl)*(tid+1);
requires indicator + 1 == 2*stride*(tid+1);
requires indicator > 0;
requires indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
ensures tid >= 0 && tid < ExpTwo(k);
ensures 2 * stride == ExpTwo(lvl);
ensures 2 * stride > 0 && 2 * stride <= ExpTwo(k);
ensures 2 * indicator + 2 == ExpTwo(lvl+1)*(tid+1);
ensures 2 * indicator + 2 == 2*stride*(tid+1);
ensures 2 * indicator + 1 > 0;
ensures 2 * indicator + 1 < ExpTwo(lvl) ==> \pointer_index(flag_after_prefix, 2 * indicator + 1, 1);
ensures 2 * indicator + 1 < ExpTwo(lvl) && 2 * indicator + 1 >= 2 * stride ==> \pointer_index(flag_after_prefix, 2 * indicator + 1 - 2 * stride, 1);
ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(lvl) && (i + 1) % (2 * stride) != 0; \pointer_index(flag_after_prefix, i, 1));
ensures (tid==0 && (2 * stride == ExpTwo(lvl))) ==> \pointer_index(flag_after_prefix, ExpTwo(k) - 1, 1);
@*/
__syncthreads();
//@ ghost Matrix_UP = Matrix_UP + seq<seq<int> > { up(Matrix_UP[lvl - 1], stride, 0, k, lvl) };
//@ assert (indicator < ExpTwo(k)) && (indicator >= stride) ==> Matrix_UP[lvl][indicator] == Matrix_UP[lvl - 1][indicator] + Matrix_UP[lvl - 1][indicator-stride];
indicator = 2 * indicator + 1;
stride = 2 * stride;
lvl = lvl + 1;
//@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i));
//@ assert stride == ExpTwo(lvl-1);
//@ assert lemma_exp2_red_mult(lvl);
//@ assert ExpTwo(lvl) == 2 * ExpTwo(lvl - 1);
//@ assert 2*stride == ExpTwo(lvl);
//@ assert indicator + 1 == ExpTwo(lvl)*(tid+1);
//@ assert indicator + 1 == 2*stride*(tid+1);
}
//@ assert stride == ExpTwo(lvl-1);
//@ assert ExpTwo(lvl-1) == ExpTwo(k);
//@ assert stride == ExpTwo(k);
//@ assert power_two_lemma(lvl-1, k);
//@ assert lvl == k + 1;
//@ assert indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == flag_after_prefix[indicator];
//@ assert |Matrix| == lvl;
//@ assert (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
//@ assert (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1]));
//@ assert (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(flag_seq));
//@ assert |Matrix[k]| == 1;
//@ assert lemma_intsum_single(Matrix[k][0]);
//@ assert intsum(Matrix[k]) == intsum(flag_seq);
//@ assert Matrix[k] == seq<int>{intsum(flag_seq)};
//@ assert Matrix[0] == flag_seq;
//@ assert (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
/////////////////////////////////////////////////////////////////////////////////
//@ assert indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
/*@
context_everywhere k > 0;
context_everywhere |Matrix_UP| == k + 1;
context_everywhere |Matrix| == k + 1;
context_everywhere lvl == k + 1;
context stride == ExpTwo(k);
context indicator + 1 == ExpTwo(lvl)*(tid+1);
context indicator + 1 == 2*stride*(tid+1);
context indicator > 0;
context stride > 0 ;
requires indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
requires (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(flag_after_prefix, ExpTwo(k) - 1, 1);
requires (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k));
requires (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i));
requires (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator] == flag_after_prefix[indicator];
requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == flag_after_prefix[indicator - stride];
requires indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
context tid >= 0 && tid < ExpTwo(k);
//ensures stride == ExpTwo(k) / 2;
//ensures indicator == ExpTwo(k) * tid + ExpTwo(k) - 1;
//ensures stride > 0 ;
//ensures indicator > 0;
ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> \pointer_index(flag_after_prefix, ExpTwo(k) * \ltid + ExpTwo(k) - 1, 1);
ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> \pointer_index(flag_after_prefix, ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2, 1);
ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (ExpTwo(k) / 2) != 0; \pointer_index(flag_after_prefix, i, 1));
ensures (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k));
ensures (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i));
ensures (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
//ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1] == flag_after_prefix[ExpTwo(k) * \ltid + ExpTwo(k) - 1];
//ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == flag_after_prefix[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2];
//ensures 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> flag_after_prefix[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid);
@*/
__syncthreads();
// (unstability) These come from the las three postconditions in the previous barrier:
//@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1] == flag_after_prefix[ExpTwo(k) * tid + ExpTwo(k) - 1];
//@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == flag_after_prefix[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2];
//@ assume 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> flag_after_prefix[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid);
/////////////////////////////////////////////////////////////////////////////////////// Down
indicator = ExpTwo(k) * tid + ExpTwo(k) - 1; // flag_after_prefix.length * tid + flag_after_prefix.length - 1;
stride = ExpTwo(k) / 2; // flag_after_prefix.length / 2;
lvl = k - 1; //lvl - 2;
int temp;
//@ ghost seq<int> temp_seq = seq<int> { 0 };
//@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl + 1][indicator] == flag_after_prefix[indicator];
//@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl + 1][indicator - stride] == flag_after_prefix[indicator - stride];
if(indicator < ExpTwo(k))
{
flag_after_prefix[indicator] = 0;
}
/*@
loop_invariant k > 0;
loop_invariant tid >= 0 && tid < ExpTwo(k);
loop_invariant lvl <= k - 1;
loop_invariant lvl >= -1;
loop_invariant lvl >= 0 ==> stride == ExpTwo(lvl);
loop_invariant lvl == -1 ==> stride == 0;
loop_invariant stride == 0 ==> lvl == -1;
loop_invariant stride >= 0;
loop_invariant indicator >= 0;
loop_invariant indicator+1 == ExpTwo(lvl+1)*(tid+1);
loop_invariant indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
loop_invariant lvl >= 0 && indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
loop_invariant (tid==0 && stride > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
//loop_invariant lvl == -1 ==> \pointer_index(flag_after_prefix, tid, 1);
//loop_invariant lvl == -1 ==> indicator == tid;
//loop_invariant indicator == tid ==> lvl == -1;
loop_invariant |temp_seq| == ExpTwo(k - (lvl + 1));
loop_invariant 0 < |temp_seq| && |temp_seq| <= ExpTwo(k);
loop_invariant temp_seq == psum2(Matrix[lvl + 1]);
loop_invariant (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
loop_invariant (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
loop_invariant (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(flag_seq));
loop_invariant (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1]));
loop_invariant Matrix[0] == flag_seq;
loop_invariant Matrix[k] == seq<int>{ intsum(flag_seq) };
loop_invariant tid < |temp_seq| && indicator < ExpTwo(k) ==> temp_seq[tid] == flag_after_prefix[indicator];
loop_invariant lvl >= 0 && 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == get(Matrix[lvl], 2 * tid);
@*/
while(stride >= 1)
{
if(indicator < ExpTwo(k) && indicator >= stride)
{
//@ assert tid < |temp_seq| ==> temp_seq[tid] == flag_after_prefix[indicator];
temp = flag_after_prefix[indicator];
//@ assert tid < |temp_seq| ==> temp == temp_seq[tid];
flag_after_prefix[indicator] = flag_after_prefix[indicator] + flag_after_prefix[indicator - stride];
//@ assert tid < |temp_seq| ==> flag_after_prefix[indicator] == temp_seq[tid] + flag_after_prefix[indicator - stride];
//@ assert 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator - stride] == get(Matrix[lvl], 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> flag_after_prefix[indicator] == temp_seq[tid] + get(Matrix[lvl], 2 * tid);
//@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid);
//@ assert tid < |Matrix[lvl + 1]| && 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(Matrix[lvl + 1]), tid) + get(Matrix[lvl], 2 * tid);
//@ assert Matrix[lvl + 1] == implode(Matrix[lvl]);
//@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(implode(Matrix[lvl])), tid) + get(Matrix[lvl], 2 * tid);
//@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true;
/*if(tid < |implode(Matrix[lvl])|){
lemma_get_psum_implode(Matrix[lvl], tid);
}*/
//@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid);
//@ ghost 2 * tid + 1 < |Matrix[lvl]| ? lemma_combine_psum(Matrix[lvl], tid) : true;
/*if(2 * tid + 1 < |Matrix[lvl]|){
lemma_combine_psum(Matrix[lvl], tid);
}*/
//@ assert 2 * tid + 1 < |Matrix[lvl]| ==> get(psum2(Matrix[lvl]), 2 * tid + 1) == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid);
//@ assert 2 * tid + 1 < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(Matrix[lvl]), 2 * tid + 1);
//@ assert tid < |temp_seq| ==> temp == temp_seq[tid];
flag_after_prefix[indicator - stride] = temp;
//@ assert tid < |temp_seq| ==> flag_after_prefix[indicator - stride] == temp_seq[tid];
//@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid);
//@ assert Matrix[lvl + 1] == implode(Matrix[lvl]);
//@ assert tid < |implode(Matrix[lvl])| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(implode(Matrix[lvl])), tid);
//@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true;
/*if(tid < |implode(Matrix[lvl])|){
lemma_get_psum_implode(Matrix[lvl], tid);
}*/
//@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl]), 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid);
}
//@ ghost temp_seq = psum2(Matrix[lvl]);
//@ assert 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == temp_seq[2 * tid];
//@ assert 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == temp_seq[2 * tid + 1];
/*@
context_everywhere lvl >= 0 && lvl <= k - 1;
requires tid >= 0 && tid < ExpTwo(k);
context_everywhere |temp_seq| == ExpTwo(k - lvl);
context_everywhere 0 < |temp_seq| && |temp_seq| <= ExpTwo(k);
context_everywhere |Matrix| == k + 1;
//context lvl - 1 == -1 ==> (indicator - 1) / 2 == \ltid;
//context (indicator - 1) / 2 == \ltid ==> lvl - 1 == -1;
requires indicator >= 0;
requires stride >= 1 ;
requires stride == ExpTwo(lvl);
requires indicator+1 == ExpTwo(lvl+1)*(\ltid+1);
requires indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
//requires 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == temp_seq[2 * tid];
//requires 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == temp_seq[2 * tid + 1];
requires (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
requires (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
//requires 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid);
ensures tid >= 0 && tid < ExpTwo(k);
ensures lvl-1 >= 0 ==> stride / 2 == ExpTwo(lvl - 1);
ensures lvl-1 == -1 ==> stride / 2 == 0;
ensures stride / 2 == 0 ==> lvl-1 == -1;
ensures stride / 2 >= 0;
ensures (indicator - 1) / 2 >= 0;
ensures (indicator - 1) / 2+1 == ExpTwo(lvl)*(tid+1);
ensures (indicator - 1) / 2 < ExpTwo(k) ==> \pointer_index(flag_after_prefix, (indicator - 1) / 2, 1);
ensures lvl-1 >= 0 && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> \pointer_index(flag_after_prefix, (indicator - 1) / 2 - stride / 2, 1);
ensures (tid==0 && stride/2 > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (stride/2) != 0; \pointer_index(flag_after_prefix, i, 1));
//ensures tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == flag_after_prefix[(indicator - 1) / 2];
ensures (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
ensures (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
//ensures lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> flag_after_prefix[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid);
@*/
__syncthreads();
//@ assume tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == flag_after_prefix[(indicator - 1) / 2];
//@ assume lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> flag_after_prefix[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid);
indicator = (indicator - 1) / 2;
stride = stride / 2;
lvl = lvl - 1;
}
//@ assert indicator == tid;
//@ assert tid >= 0 && tid < ExpTwo(k);
//@ assert temp_seq == psum2(Matrix[0]);
//@ assert Matrix[0] == flag_seq;
//@ assert temp_seq == psum2(flag_seq);
//@ assert |temp_seq| == ExpTwo(k);
//@ assert temp_seq[tid] == flag_after_prefix[indicator];
//@ assert flag_after_prefix[indicator] == get(psum2(flag_seq), tid);
//@ assert lemma_intsum_flag(flag_seq, indicator);
//@ assert count(flag_seq) == ExpTwo(M);
//@ assert intsum(flag_seq) == ExpTwo(M);
//@ assert flag_seq[tid] == 1 ? lemma_sum_pos_flag(flag_seq, tid) : true;
//@ assert lemma_take_sum(flag_seq, tid);
//@ assert flag_seq[tid] == flag_before_prefix[indicator];
//@ assert flag_after_prefix[indicator] >= 0 && flag_after_prefix[indicator] <= ExpTwo(M);
//@ assert flag_before_prefix[indicator] == 1 ==> flag_after_prefix[indicator] >= 0 && flag_after_prefix[indicator] < ExpTwo(M);
/*@
requires indicator == tid;
requires tid >= 0 && tid < ExpTwo(k);
requires tid < ExpTwo(M) ==> \pointer_index(output, tid, 1);
requires \pointer_index(input, tid, 1\2);
requires \pointer_index(flag_before_prefix, tid, 1\2);
requires \pointer_index(flag_after_prefix, tid, 1);
requires |temp_seq| == ExpTwo(k);
requires temp_seq == psum2(flag_seq);
//requires temp_seq[tid] == flag_after_prefix[indicator];
requires |flag_seq| == ExpTwo(k);
requires flag_seq[tid] == flag_before_prefix[indicator];
requires |inp| == ExpTwo(k);
requires (inp[tid] == input[tid]);
ensures indicator == tid;
ensures tid >= 0 && tid < ExpTwo(k);
ensures |temp_seq| == ExpTwo(k);
ensures temp_seq == psum2(flag_seq);
ensures \pointer_index(input, tid, 1\2);
ensures \pointer_index(flag_before_prefix, tid, 1\2);
ensures \pointer_index(flag_after_prefix, tid, 1\2);
ensures flag_before_prefix[tid] == 1 ==> flag_after_prefix[tid] >= 0 && flag_after_prefix[tid] < ExpTwo(M);
ensures flag_before_prefix[tid] == 1 ==> \pointer_index(output, flag_after_prefix[tid], 1);
//ensures temp_seq[tid] == flag_after_prefix[indicator];
ensures |flag_seq| == ExpTwo(k);
ensures flag_seq[tid] == flag_before_prefix[indicator];
ensures |inp| == ExpTwo(k);
ensures (inp[tid] == input[tid]);
@*/
__syncthreads();
if(flag_before_prefix[tid] == 1){
output[flag_after_prefix[tid]] = input[tid];
//@ assert (output[flag_after_prefix[tid]] == input[tid]);
}
//@ assert flag_before_prefix[tid] == 1 ==> (output[flag_after_prefix[tid]] == input[tid]);
//@ ghost seq<int> temporary;
//@ ghost temporary = compact(inp, flag_seq);
//@ assert temporary == compact(inp, flag_seq);
//@ assert |temporary| == ExpTwo(M);
//@ assert intsum(Take(flag_seq, tid)) >= 0;
//@ assume temp_seq[tid] == flag_after_prefix[indicator];
//@ assert flag_after_prefix[tid] >= 0;
//@ assert \pointer_index(input, tid, 1\2);
//@ assert \pointer_index(flag_before_prefix, tid, 1\2);
//@ assert \pointer_index(flag_after_prefix, tid, 1\2);
//@ assert flag_before_prefix[tid] == 1 ==> \pointer_index(output, flag_after_prefix[tid], 1);
//@ assert flag_before_prefix[tid] == flag_seq[tid];
//@ assert flag_before_prefix[tid] == 1 ==> (lemma_correctness(inp, flag_seq, tid));
//@ assert flag_before_prefix[tid] == 1 ==> inp[tid] == get(compact(inp, flag_seq), intsum(Take(flag_seq, tid)));
// assert temporary == compact(inp, flag_seq);
//@ assert flag_before_prefix[tid] == 1 ==> (inp[tid] == get(temporary, intsum(Take(flag_seq, tid))));
//@ assert (inp[tid] == input[tid]);
//@ assert flag_before_prefix[tid] == 1 ==> (input[tid] == get(temporary, flag_after_prefix[tid]));
// assert flag_before_prefix[tid] == 1 ==> (output[flag_after_prefix[tid]] == input[tid]);
//@ assert flag_before_prefix[tid] == 1 ==> (output[flag_after_prefix[tid]] == get(temporary, flag_after_prefix[tid]));
}
| StreamCompaction.cu | /***********************************************************************************
Created by Mohsen Safari.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
////////////////////////////////////////////////////////////////////////////////
// Pure Functions
////////////////////////////////////////////////////////////////////////////////
/*@
requires 0 <= p;
ensures p < \result;
pure int ExpTwo(int p) = 0 < p ? 2 * ExpTwo(p - 1) : 1;
@*/
/*@
ensures |xs| == 0 ==> \result == 0;
ensures |xs| == 1 ==> \result == head(xs);
pure int intsum(seq<int> xs) =
0 < |xs| ? head(xs) + intsum(tail(xs)) : 0;
@*/
/*@
requires n <= |xs|;
ensures n < 0 ==> |Take(xs, n)| == 0;
ensures 0 <= n ==> |Take(xs, n)| == n;
ensures (\forall int i; 0 <= i && i < n; xs[i] == get(Take(xs, n), i));
pure seq<int> Take(seq<int> xs, int n) =
0 < n ? seq<int> { head(xs) } + Take(tail(xs), n - 1) : seq<int> { };
@*/
/*@
requires 0 <= i && i <= |xs|;
ensures |\result| == |xs| - i;
ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, i+j)));
pure seq<int> psum(seq<int> xs, int i) =
i < |xs| ? seq<int> { intsum(Take(xs, i)) } + psum(xs, i + 1) : seq<int> { };
@*/
/*@
ensures |\result| == |xs|;
ensures (\forall int j; 0 <= j && j < |\result|; \result[j] == intsum(Take(xs, j)));
pure seq<int> psum2(seq<int> xs) = psum(xs, 0);
@*/
/*@
requires |xs| >= 0;
ensures |xs| == 0 ==> \result == xs;
ensures |xs| == 1 ==> \result == xs;
ensures |xs| == 2 ==> \result == seq<int> { head(xs) + head(tail(xs)) };
ensures |xs| % 2 == 0 ==> |\result| == |xs| / 2;
pure seq<int> implode(seq<int> xs) =
1 < |xs| ? seq<int> { head(xs) + head(tail(xs)) } + implode(tail(tail(xs))) : xs;
@*/
/*@
requires 0 <= p;
pure int exp(int n, int p) = 0 < p ? n * exp(n, p - 1) : 1;
@*/
/*@
requires 0 <= n;
requires n < |xs|;
pure int get(seq<int> xs, int n) = xs[n];
@*/
/*@
requires k > 0;
requires |xs| == ExpTwo(k);
requires i >= 0 && i <= |xs|;
requires 1 <= lvl && lvl <= k;
requires stride == ExpTwo(lvl-1);
requires stride > 0 && stride < |xs|;
ensures |\result| == |xs| - i;
ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && ((i+j) >= stride) && (((i+j) % (2*stride)) == (2*stride-1))) ==> \result[j] == xs[i+j] + xs[i+j - stride]);
ensures (\forall int j; j >= 0 && j < |\result|; ((i < |xs|) && (((i+j) < stride) || (((i+j) % (2*stride)) != (2*stride-1)))) ==> \result[j] == xs[i+j]);
pure seq<int> up(seq<int> xs, int stride, int i, int k, int lvl) =
i < |xs| ? (
((i % (2*stride)) == (2*stride-1) && (i >= stride)?
seq<int> {xs[i] + xs[i-stride]} + up(xs, stride, i+1, k, lvl)
:
seq<int> {xs[i]} + up(xs, stride, i+1, k, lvl) ))
:
seq<int> {};
@*/
/*@
requires (\forall int i; 0 <= i && i < |xs|; xs[i] == 0 || xs[i] == 1);
ensures \result == intsum(xs);
ensures \result >= 0;
pure int count(seq<int> xs) =
|xs| > 0 ? ( head(xs) == 1 ? 1+count(tail(xs)) : count(tail(xs)) ) : 0;
@*/
/*@
requires |flags| == |input|;
requires (\forall int j; 0 <= j && j < |flags|; flags[j] == 0 || flags[j] == 1);
ensures |\result| == intsum(flags);
ensures 0 <= |\result| && |\result| <= |flags|;
pure seq<int> compact(seq<int> input, seq<int> flags) =
0 < |input| ?
(head(flags) == 1 ? seq<int> { head(input) } + compact(tail(input), tail(flags)) : compact(tail(input), tail(flags)))
:
seq<int> { };
@*/
////////////////////////////////////////////////////////////////////////////////////////Lemmas
/* The sum of a list of non-negative integers is itself non-negative. */
/*@
requires (\forall int i; 0 <= i && i < |xs|; 0 <= xs[i]);
ensures \result && 0 <= intsum(xs);
pure bool lemma_sum_nonneg(seq<int> xs);
@*/
/*@
ensures \result && intsum(seq<int> { }) == 0;
pure bool lemma_intsum_zero();
@*/
/*@
ensures \result && psum2(seq<int> { }) == seq<int> { };
pure bool lemma_psum_zero();
@*/
/*@
ensures \result && intsum(seq<int> { x }) == x;
pure bool lemma_intsum_single(int x);
@*/
/*@
requires |xs| == 1;
ensures \result && psum2(xs) == seq<int> {0};
pure bool lemma_psum_single(seq<int> xs);
@*/
/*@
requires |xs| >= 0;
requires |ys| >= 0;
ensures \result && |xs| == 0 ==> intsum(xs + ys) == intsum(ys);
ensures \result && |ys| == 0 ==> intsum(xs + ys) == intsum(xs);
ensures \result && |xs + ys| == |xs| + |ys|;
ensures \result && intsum(tail(xs) + ys) == intsum(tail(xs)) + intsum(ys);
ensures \result && intsum(xs + ys) == intsum(xs) + intsum(ys);
pure bool lemma_intsum_app(seq<int> xs, seq<int> ys);
@*/
/*@
requires |xs| <= 1;
ensures \result && xs == implode(xs);
pure bool lemma_implode_base(seq<int> xs);
@*/
/*@
ensures \result && intsum(xs) == intsum(implode(xs));
pure bool lemma_implode_sum(seq<int> xs);
@*/
/*@
requires 0 < n;
ensures \result && ExpTwo(n) == 2 * ExpTwo(n - 1);
pure bool lemma_exp2_red_mult(int n);
@*/
/*@
requires 0 < n;
ensures \result && ExpTwo(n) / 2 == ExpTwo(n - 1);
pure bool lemma_exp2_red_div(int n);
@*/
/*@
requires 0 <= n;
ensures \result && 0 < ExpTwo(n);
pure bool lemma_exp2_positive(int n);
@*/
/*@
requires 0 <= i;
requires i <= j;
ensures \result && ExpTwo(i) <= ExpTwo(j);
pure bool lemma_exp2_leq(int i, int j);
@*/
/*@
requires i >= 0 && j >= 0;
requires ExpTwo(i) == ExpTwo(j);
ensures \result && i == j;
pure bool power_two_lemma(int i, int j);
@*/
/*@
requires |xs| % 2 == 0;
ensures \result && |implode(xs)| == |xs| / 2;
pure bool lemma_implode_length_mod_two(seq<int> xs);
@*/
/*@
requires 0 < n && |xs| == ExpTwo(n);
ensures \result && |implode(xs)| == ExpTwo(n - 1);
pure bool lemma_implode_red_exp2(seq<int> xs, int n);
@*/
/*@
requires 0 < i;
requires i < |xs|;
ensures \result && get(tail(xs), i - 1) == xs[i];
pure bool lemma_intseq_index_tail(seq<int> xs, int i);
@*/
/*@
requires |xs| % 2 == 0;
requires 0 <= i && i < |implode(xs)|;
requires (2 * i) < |xs|;
requires (2 * i + 1) < |xs|;
ensures \result && get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1];
pure bool lemma_implode_get(seq<int> xs, int i);
@*/
/*@
requires |xs| % 2 == 0;
requires |implode(xs)| == |xs|/2;
ensures \result && (\forall int i; 0 <= i && i < |implode(xs)|; get(implode(xs), i) == xs[2 * i] + xs[2 * i + 1]);
pure bool lemma_implode_get_all(seq<int> xs);
@*/
/*@
requires |xs| == 2 * |ys|;
requires 0 <= |ys|;
requires (\forall int i; 0 <= i && i < |ys|; ys[i] == xs[2*i] + xs[2*i+1]);
ensures \result && ys == implode(xs);
pure bool lemma_implode_rel(seq<int> xs, seq<int> ys);
@*/
/*@
requires 0 <= i && i < |xs|;
ensures \result && get(psum2(xs), i) == intsum(Take(xs, i));
pure bool lemma_psum_get(seq<int> xs, int i);
@*/
/*@
ensures \result && (\forall int i; 0 <= i && i < |xs|; get(psum2(xs), i) == intsum(Take(xs, i)));
pure bool lemma_psum_get_all(seq<int> xs);
@*/
/*@
requires 0 < n && n <= |xs|;
ensures \result && Take(xs, n) == Take(xs, n - 1) + seq<int> { xs[n - 1] };
pure bool missing_lemma_2(seq<int> xs, int n);
@*/
/*@
requires |xs| % 2 == 0;
requires |ys| % 2 == 0;
ensures \result && implode(xs + ys) == implode(xs) + implode(ys);
pure bool missing_lemma_3(seq<int> xs, seq<int> ys);
@*/
/*@
ensures \result && xs + (ys + zs) == (xs + ys) + zs;
pure bool intseq_concat_assoc(seq<int> xs, seq<int> ys, seq<int> zs);
@*/
/*@
requires |xs| % 2 == 0;
requires 0 <= n && n < |implode(xs)|;
requires |implode(xs)| == |xs| / 2;
ensures \result && Take(implode(xs), n) == implode(Take(xs, 2 * n));
pure bool missing_lemma(seq<int> xs, int n);
@*/
/*@
requires |xs| % 2 == 0;
requires |implode(xs)| == |xs|/2;
requires 0 <= i && i < |implode(xs)|;
requires 2 * i < |xs|;
ensures \result && get(psum2(implode(xs)), i) == intsum(Take(xs, 2 * i));
pure bool lemma_psum_Take2(seq<int> xs, int i);
@*/
/*@
requires |xs| % 2 == 0;
requires |implode(xs)| == |xs|/2;
requires 0 <= i && i < |implode(xs)|;
requires 2 * i < |xs|;
ensures \result && get(psum2(implode(xs)), i) == get(psum2(xs), 2 * i);
pure bool lemma_get_psum_implode(seq<int> xs, int i);
@*/
/*@
requires 0 <= i;
requires 2 * i + 1 < |xs|;
ensures \result && get(psum2(xs), 2 * i + 1) == get(psum2(xs), 2 * i) + get(xs, 2 * i);
pure bool lemma_combine_psum(seq<int> xs, int i);
@*/
/*@
requires (\forall int j; 0 <= j && j < |xs|; xs[j] == 0 || xs[j] == 1);
ensures \result && intsum(xs) >= 0;
pure bool lemma_intsum_positive(seq<int> xs);
@*/
/*@
requires i >= 0;
requires i < |xs|;
requires (\forall int j; 0 <= j && j < i; xs[j] == 0 || xs[j] == 1);
ensures \result && i < |Take(xs, i)| ==> intsum(Take(xs, i)) >= 0;
pure bool lemma_intsum_flag(seq<int> xs, int i) = true;
@*/
/* assuming all elements in `xs` are non-negative, the sum of any sublist of `xs` will not be bigger than the sum of `xs`. */
/*@
requires n <= |xs|;
requires (\forall int i; 0 <= i && i < |xs|; 0 <= xs[i]);
ensures \result && 0 <= intsum(Take(xs, n)) && intsum(Take(xs, n)) <= intsum(xs);
pure bool lemma_take_sum(seq<int> xs, int n);
@*/
/*@
requires 0 <= n && n < |xs|;
requires (\forall int i; 0 <= i && i < |xs|; xs[i] == 0 || xs[i] == 1);
ensures \result && intsum(Take(xs, n)) < |xs|;
pure bool lemma_flags_take_size(seq<int> xs, int n);
@*/
/*@
requires 0 <= n && n < |xs|;
requires (\forall int i; 0 <= i && i < |xs|; xs[i] == 0 || xs[i] == 1);
requires xs[n] == 1;
ensures \result && intsum(Take(xs, n)) < intsum(xs);
pure bool lemma_sum_pos_flag(seq<int> xs, int n);
@*/
/*@
requires 0 <= n && n < |flags|;
requires flags[n] == 1;
requires |flags| == |input|;
requires (\forall int j; 0 <= j && j < |flags|; flags[j] == 0 || flags[j] == 1);
ensures \result && 0 <= intsum(Take(flags, n));
ensures \result && intsum(Take(flags, n)) < intsum(flags);
ensures \result && intsum(Take(flags, n)) < |flags|;
ensures \result && input[n] == get(compact(input, flags), intsum(Take(flags, n)));
ensures \result && input[n] == get(compact(input, flags), get(psum2(flags), n));
pure bool lemma_correctness(seq<int> input, seq<int> flags, int n);
@*/
////////////////////////////////////////////////////////////////////////////////
//Kernel
////////////////////////////////////////////////////////////////////////////////
/*@
context_everywhere flag_after_prefix != NULL;
context_everywhere flag_before_prefix != NULL;
context_everywhere input != NULL;
context_everywhere output != NULL;
context_everywhere k == 10;
context_everywhere M == 8;
context_everywhere opencl_gsize == ExpTwo(k);
context_everywhere opencl_gcount == 1;
requires \ltid < (ExpTwo(k)+2-1)/2 ==> \pointer_index(flag_after_prefix, 2*\ltid, write);
requires \ltid < (ExpTwo(k)-1+2-1)/2 ==> \pointer_index(flag_after_prefix, 2*\ltid+1, write);
requires (\ltid >= 0 && \ltid <= 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % 1 != 0; \pointer_index(flag_after_prefix, i, write));
requires \pointer_index(input, \ltid, 1\2);
requires \pointer_index(flag_before_prefix, \ltid, 1\2);
requires flag_before_prefix[\ltid] == 0 || flag_before_prefix[\ltid] == 1;
requires \ltid < ExpTwo(M) ==> \pointer_index(output, \ltid, write);
@*/
__global__ void CUDA_Kernel_Stream_Compaction(int* input, int* output, int* flag_before_prefix, int* flag_after_prefix, int k, int M)
{
int tid = threadIdx.x;
//@ assert tid == \ltid;
//@ ghost seq<int> flag_seq;
//@ assume |flag_seq| == ExpTwo(k);
//@ assume (\forall int i; 0 <= i && i < ExpTwo(k); flag_seq[i] == 0 || flag_seq[i] == 1);
//@ assume (flag_seq[tid] == 0 || flag_seq[tid] == 1) && count(flag_seq) == ExpTwo(M);
//@ assume (2 * tid < ExpTwo(k)) ==> flag_after_prefix[2 * tid] == flag_seq[2 * tid];
//@ assume (2 * tid + 1 < ExpTwo(k)) ==> flag_after_prefix[2 * tid + 1] == flag_seq[2 * tid + 1];
//@ assume flag_seq[tid] == flag_before_prefix[tid];
//@ assume 2 * tid < ExpTwo(k) ==> flag_after_prefix[2 * tid] == 0 || flag_after_prefix[2 * tid] == 1;
//@ assume 2 * tid + 1 < ExpTwo(k) ==> flag_after_prefix[2 * tid + 1] == 0 || flag_after_prefix[2 * tid + 1] == 1;
//@ ghost seq<int> inp;
//@ assume |inp| == ExpTwo(k) && inp[tid] == input[tid];
int indicator = 2 * tid + 1;
int stride = 1;
int lvl = 1;
//@ ghost seq<seq<int> > Matrix_UP = seq<seq<int> > { flag_seq };
//@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i));
//@ ghost seq<seq<int> > Matrix = seq<seq<int> > { flag_seq };
/*@
loop_invariant k > 0;
loop_invariant tid >= 0 && tid < ExpTwo(k);
loop_invariant stride > 0;
loop_invariant 1 <= lvl;
loop_invariant stride == ExpTwo(lvl-1);
loop_invariant lvl <= k+1;
loop_invariant indicator + 1 == ExpTwo(lvl)*(tid+1);
loop_invariant indicator + 1 == 2*stride*(tid+1);
loop_invariant indicator > 0;
loop_invariant stride <= ExpTwo(k);
loop_invariant indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
loop_invariant tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
loop_invariant (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(flag_after_prefix, ExpTwo(k) - 1, 1);
loop_invariant |Matrix_UP| == lvl;
loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix_UP[i]| == ExpTwo(k));
loop_invariant lvl == 1 ==> Matrix_UP[lvl - 1] == flag_seq;
loop_invariant lvl > 1 && lvl < |Matrix_UP| ==> Matrix_UP[lvl] == up(Matrix_UP[lvl - 1], (stride/2) - 1, 0, k, lvl - 1);
loop_invariant indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == flag_after_prefix[indicator];
loop_invariant indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == flag_after_prefix[indicator - stride];
loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][ExpTwo(k) - 1] == intsum(flag_seq);
loop_invariant lvl == k+1 ==> Matrix_UP[lvl-1][(ExpTwo(k) - 1)/2] == intsum(Take(flag_seq, |flag_seq|/2));
loop_invariant |Matrix| == lvl;
loop_invariant (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
loop_invariant (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i));
loop_invariant (\forall int i; 0 < i && i < lvl; Matrix[i] == implode(Matrix[i - 1]));
loop_invariant (\forall int i; 0 <= i && i < lvl; intsum(Matrix[i]) == intsum(flag_seq));
loop_invariant Matrix[0] == flag_seq;
loop_invariant indicator < ExpTwo(k) && 2 * tid + 1 < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1];
loop_invariant indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
@*/
while(stride < ExpTwo(k))
{
if(indicator < ExpTwo(k) && indicator >= stride)
{
//@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1];
//@ assert 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
flag_after_prefix[indicator] = flag_after_prefix[indicator] + flag_after_prefix[indicator - stride];
//@ assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid];
}
//@ assert lemma_implode_length_mod_two(Matrix[lvl - 1]);
//@ assert lemma_implode_sum(Matrix[lvl - 1]);
//@ assert lemma_implode_get_all(Matrix[lvl - 1]);
//@ ghost Matrix = Matrix + seq<seq<int> > { implode(Matrix[lvl - 1]) };
//@ ghost tid < |implode(Matrix[lvl - 1])| ? (lemma_implode_get(Matrix[lvl - 1], tid) && (2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1]) && (indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid]) && (Matrix[lvl] == implode(Matrix[lvl - 1])) && (indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl][tid])) : true;
/*if(tid < |implode(Matrix[lvl - 1])|){
lemma_implode_get(Matrix[lvl - 1], tid);
assert 2 * tid + 1 < |Matrix[lvl - 1]| ==> get(implode(Matrix[lvl - 1]), tid) == Matrix[lvl - 1][2 * tid] + Matrix[lvl - 1][2 * tid + 1];
assert indicator < flag_after_prefix.length && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl - 1][2 * tid + 1] + Matrix[lvl - 1][2 * tid];
assert Matrix[lvl] == implode(Matrix[lvl - 1]);
assert indicator < flag_after_prefix.length && indicator >= stride ==> flag_after_prefix[indicator] == Matrix[lvl][tid];
}*/
/*@
context_everywhere k > 0;
context_everywhere 1 <= lvl && lvl <= k;
context_everywhere |Matrix| == lvl + 1;
requires tid >= 0 && tid < ExpTwo(k);
requires stride == ExpTwo(lvl-1);
requires stride > 0 && stride < ExpTwo(k);
requires indicator + 1 == ExpTwo(lvl)*(tid+1);
requires indicator + 1 == 2*stride*(tid+1);
requires indicator > 0;
requires indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
ensures tid >= 0 && tid < ExpTwo(k);
ensures 2 * stride == ExpTwo(lvl);
ensures 2 * stride > 0 && 2 * stride <= ExpTwo(k);
ensures 2 * indicator + 2 == ExpTwo(lvl+1)*(tid+1);
ensures 2 * indicator + 2 == 2*stride*(tid+1);
ensures 2 * indicator + 1 > 0;
ensures 2 * indicator + 1 < ExpTwo(lvl) ==> \pointer_index(flag_after_prefix, 2 * indicator + 1, 1);
ensures 2 * indicator + 1 < ExpTwo(lvl) && 2 * indicator + 1 >= 2 * stride ==> \pointer_index(flag_after_prefix, 2 * indicator + 1 - 2 * stride, 1);
ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(lvl) && (i + 1) % (2 * stride) != 0; \pointer_index(flag_after_prefix, i, 1));
ensures (tid==0 && (2 * stride == ExpTwo(lvl))) ==> \pointer_index(flag_after_prefix, ExpTwo(k) - 1, 1);
@*/
__syncthreads();
//@ ghost Matrix_UP = Matrix_UP + seq<seq<int> > { up(Matrix_UP[lvl - 1], stride, 0, k, lvl) };
//@ assert (indicator < ExpTwo(k)) && (indicator >= stride) ==> Matrix_UP[lvl][indicator] == Matrix_UP[lvl - 1][indicator] + Matrix_UP[lvl - 1][indicator-stride];
indicator = 2 * indicator + 1;
stride = 2 * stride;
lvl = lvl + 1;
//@ assert (\forall int i; 0 < i && i < lvl; Matrix_UP[i] == up(Matrix_UP[i - 1], stride/ExpTwo(lvl-i), 0, k, i));
//@ assert stride == ExpTwo(lvl-1);
//@ assert lemma_exp2_red_mult(lvl);
//@ assert ExpTwo(lvl) == 2 * ExpTwo(lvl - 1);
//@ assert 2*stride == ExpTwo(lvl);
//@ assert indicator + 1 == ExpTwo(lvl)*(tid+1);
//@ assert indicator + 1 == 2*stride*(tid+1);
}
//@ assert stride == ExpTwo(lvl-1);
//@ assert ExpTwo(lvl-1) == ExpTwo(k);
//@ assert stride == ExpTwo(k);
//@ assert power_two_lemma(lvl-1, k);
//@ assert lvl == k + 1;
//@ assert indicator < ExpTwo(k) ==> Matrix_UP[lvl - 1][indicator] == flag_after_prefix[indicator];
//@ assert |Matrix| == lvl;
//@ assert (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
//@ assert (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1]));
//@ assert (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(flag_seq));
//@ assert |Matrix[k]| == 1;
//@ assert lemma_intsum_single(Matrix[k][0]);
//@ assert intsum(Matrix[k]) == intsum(flag_seq);
//@ assert Matrix[k] == seq<int>{intsum(flag_seq)};
//@ assert Matrix[0] == flag_seq;
//@ assert (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
/////////////////////////////////////////////////////////////////////////////////
//@ assert indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
/*@
context_everywhere k > 0;
context_everywhere |Matrix_UP| == k + 1;
context_everywhere |Matrix| == k + 1;
context_everywhere lvl == k + 1;
context stride == ExpTwo(k);
context indicator + 1 == ExpTwo(lvl)*(tid+1);
context indicator + 1 == 2*stride*(tid+1);
context indicator > 0;
context stride > 0 ;
requires indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
requires (tid==0 && (stride == ExpTwo(k))) ==> \pointer_index(flag_after_prefix, ExpTwo(k) - 1, 1);
requires (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k));
requires (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i));
requires (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator] == flag_after_prefix[indicator];
requires indicator < ExpTwo(k) && indicator >= stride ==> Matrix_UP[lvl - 1][indicator - stride] == flag_after_prefix[indicator - stride];
requires indicator < ExpTwo(k) && indicator >= stride && 2 * tid < |Matrix[lvl - 1]| ==> flag_after_prefix[indicator - stride] == Matrix[lvl - 1][2 * tid];
context tid >= 0 && tid < ExpTwo(k);
//ensures stride == ExpTwo(k) / 2;
//ensures indicator == ExpTwo(k) * tid + ExpTwo(k) - 1;
//ensures stride > 0 ;
//ensures indicator > 0;
ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> \pointer_index(flag_after_prefix, ExpTwo(k) * \ltid + ExpTwo(k) - 1, 1);
ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> \pointer_index(flag_after_prefix, ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2, 1);
ensures tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (ExpTwo(k) / 2) != 0; \pointer_index(flag_after_prefix, i, 1));
ensures (\forall int i; 0 <= i && i <= k; |Matrix_UP[i]| == ExpTwo(k));
ensures (\forall int i; 0 <= i && i < lvl; |Matrix[i]| == ExpTwo(k - i));
ensures (\forall int i; 0 <= i && i < lvl; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
//ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1] == flag_after_prefix[ExpTwo(k) * \ltid + ExpTwo(k) - 1];
//ensures ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == flag_after_prefix[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2];
//ensures 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * \ltid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * \ltid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> flag_after_prefix[ExpTwo(k) * \ltid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid);
@*/
__syncthreads();
// (unstability) These come from the las three postconditions in the previous barrier:
//@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1] == flag_after_prefix[ExpTwo(k) * tid + ExpTwo(k) - 1];
//@ assume ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl - 1][ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == flag_after_prefix[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2];
//@ assume 2 * tid < |Matrix[lvl-2]| && ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> flag_after_prefix[ExpTwo(k) * tid + ExpTwo(k) - 1 - ExpTwo(k) / 2] == get(Matrix[lvl-2], 2 * tid);
/////////////////////////////////////////////////////////////////////////////////////// Down
indicator = ExpTwo(k) * tid + ExpTwo(k) - 1; // flag_after_prefix.length * tid + flag_after_prefix.length - 1;
stride = ExpTwo(k) / 2; // flag_after_prefix.length / 2;
lvl = k - 1; //lvl - 2;
int temp;
//@ ghost seq<int> temp_seq = seq<int> { 0 };
//@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) ==> Matrix_UP[lvl + 1][indicator] == flag_after_prefix[indicator];
//@ assert ExpTwo(k) * tid + ExpTwo(k) - 1 < ExpTwo(k) && ExpTwo(k) * tid + ExpTwo(k) - 1 >= ExpTwo(k) / 2 ==> Matrix_UP[lvl + 1][indicator - stride] == flag_after_prefix[indicator - stride];
if(indicator < ExpTwo(k))
{
flag_after_prefix[indicator] = 0;
}
/*@
loop_invariant k > 0;
loop_invariant tid >= 0 && tid < ExpTwo(k);
loop_invariant lvl <= k - 1;
loop_invariant lvl >= -1;
loop_invariant lvl >= 0 ==> stride == ExpTwo(lvl);
loop_invariant lvl == -1 ==> stride == 0;
loop_invariant stride == 0 ==> lvl == -1;
loop_invariant stride >= 0;
loop_invariant indicator >= 0;
loop_invariant indicator+1 == ExpTwo(lvl+1)*(tid+1);
loop_invariant indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
loop_invariant lvl >= 0 && indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
loop_invariant (tid==0 && stride > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
//loop_invariant lvl == -1 ==> \pointer_index(flag_after_prefix, tid, 1);
//loop_invariant lvl == -1 ==> indicator == tid;
//loop_invariant indicator == tid ==> lvl == -1;
loop_invariant |temp_seq| == ExpTwo(k - (lvl + 1));
loop_invariant 0 < |temp_seq| && |temp_seq| <= ExpTwo(k);
loop_invariant temp_seq == psum2(Matrix[lvl + 1]);
loop_invariant (\forall int i; 0 <= i && i < k + 1; 0 < |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
loop_invariant (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
loop_invariant (\forall int i; 0 <= i && i < k + 1; intsum(Matrix[i]) == intsum(flag_seq));
loop_invariant (\forall int i; 0 < i && i < k + 1; Matrix[i] == implode(Matrix[i - 1]));
loop_invariant Matrix[0] == flag_seq;
loop_invariant Matrix[k] == seq<int>{ intsum(flag_seq) };
loop_invariant tid < |temp_seq| && indicator < ExpTwo(k) ==> temp_seq[tid] == flag_after_prefix[indicator];
loop_invariant lvl >= 0 && 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == get(Matrix[lvl], 2 * tid);
@*/
while(stride >= 1)
{
if(indicator < ExpTwo(k) && indicator >= stride)
{
//@ assert tid < |temp_seq| ==> temp_seq[tid] == flag_after_prefix[indicator];
temp = flag_after_prefix[indicator];
//@ assert tid < |temp_seq| ==> temp == temp_seq[tid];
flag_after_prefix[indicator] = flag_after_prefix[indicator] + flag_after_prefix[indicator - stride];
//@ assert tid < |temp_seq| ==> flag_after_prefix[indicator] == temp_seq[tid] + flag_after_prefix[indicator - stride];
//@ assert 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator - stride] == get(Matrix[lvl], 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> flag_after_prefix[indicator] == temp_seq[tid] + get(Matrix[lvl], 2 * tid);
//@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid);
//@ assert tid < |Matrix[lvl + 1]| && 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(Matrix[lvl + 1]), tid) + get(Matrix[lvl], 2 * tid);
//@ assert Matrix[lvl + 1] == implode(Matrix[lvl]);
//@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(implode(Matrix[lvl])), tid) + get(Matrix[lvl], 2 * tid);
//@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true;
/*if(tid < |implode(Matrix[lvl])|){
lemma_get_psum_implode(Matrix[lvl], tid);
}*/
//@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid);
//@ ghost 2 * tid + 1 < |Matrix[lvl]| ? lemma_combine_psum(Matrix[lvl], tid) : true;
/*if(2 * tid + 1 < |Matrix[lvl]|){
lemma_combine_psum(Matrix[lvl], tid);
}*/
//@ assert 2 * tid + 1 < |Matrix[lvl]| ==> get(psum2(Matrix[lvl]), 2 * tid + 1) == get(psum2(Matrix[lvl]), 2 * tid) + get(Matrix[lvl], 2 * tid);
//@ assert 2 * tid + 1 < |Matrix[lvl]| ==> flag_after_prefix[indicator] == get(psum2(Matrix[lvl]), 2 * tid + 1);
//@ assert tid < |temp_seq| ==> temp == temp_seq[tid];
flag_after_prefix[indicator - stride] = temp;
//@ assert tid < |temp_seq| ==> flag_after_prefix[indicator - stride] == temp_seq[tid];
//@ assert tid < |Matrix[lvl + 1]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl + 1]), tid);
//@ assert Matrix[lvl + 1] == implode(Matrix[lvl]);
//@ assert tid < |implode(Matrix[lvl])| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(implode(Matrix[lvl])), tid);
//@ ghost tid < |implode(Matrix[lvl])| ? lemma_get_psum_implode(Matrix[lvl], tid) : true;
/*if(tid < |implode(Matrix[lvl])|){
lemma_get_psum_implode(Matrix[lvl], tid);
}*/
//@ assert tid < |implode(Matrix[lvl])| && 2 * tid < |Matrix[lvl]| ==> get(psum2(implode(Matrix[lvl])), tid) == get(psum2(Matrix[lvl]), 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| && tid < |temp_seq| ==> temp_seq[tid] == get(psum2(Matrix[lvl]), 2 * tid);
//@ assert 2 * tid < |Matrix[lvl]| ==> flag_after_prefix[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid);
}
//@ ghost temp_seq = psum2(Matrix[lvl]);
//@ assert 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == temp_seq[2 * tid];
//@ assert 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == temp_seq[2 * tid + 1];
/*@
context_everywhere lvl >= 0 && lvl <= k - 1;
requires tid >= 0 && tid < ExpTwo(k);
context_everywhere |temp_seq| == ExpTwo(k - lvl);
context_everywhere 0 < |temp_seq| && |temp_seq| <= ExpTwo(k);
context_everywhere |Matrix| == k + 1;
//context lvl - 1 == -1 ==> (indicator - 1) / 2 == \ltid;
//context (indicator - 1) / 2 == \ltid ==> lvl - 1 == -1;
requires indicator >= 0;
requires stride >= 1 ;
requires stride == ExpTwo(lvl);
requires indicator+1 == ExpTwo(lvl+1)*(\ltid+1);
requires indicator < ExpTwo(k) ==> \pointer_index(flag_after_prefix, indicator, 1);
requires indicator < ExpTwo(k) && indicator >= stride ==> \pointer_index(flag_after_prefix, indicator - stride, 1);
requires tid==0 ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % stride != 0; \pointer_index(flag_after_prefix, i, 1));
//requires 2 * tid < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == temp_seq[2 * tid];
//requires 2 * tid + 1 < |temp_seq| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator] == temp_seq[2 * tid + 1];
requires (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
requires (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
//requires 2 * tid < |Matrix[lvl]| && indicator < ExpTwo(k) && indicator >= stride ==> flag_after_prefix[indicator - stride] == get(psum2(Matrix[lvl]), 2 * tid);
ensures tid >= 0 && tid < ExpTwo(k);
ensures lvl-1 >= 0 ==> stride / 2 == ExpTwo(lvl - 1);
ensures lvl-1 == -1 ==> stride / 2 == 0;
ensures stride / 2 == 0 ==> lvl-1 == -1;
ensures stride / 2 >= 0;
ensures (indicator - 1) / 2 >= 0;
ensures (indicator - 1) / 2+1 == ExpTwo(lvl)*(tid+1);
ensures (indicator - 1) / 2 < ExpTwo(k) ==> \pointer_index(flag_after_prefix, (indicator - 1) / 2, 1);
ensures lvl-1 >= 0 && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> \pointer_index(flag_after_prefix, (indicator - 1) / 2 - stride / 2, 1);
ensures (tid==0 && stride/2 > 0) ==> (\forall* int i; 0 <= i && i < ExpTwo(k) && (i + 1) % (stride/2) != 0; \pointer_index(flag_after_prefix, i, 1));
//ensures tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == flag_after_prefix[(indicator - 1) / 2];
ensures (\forall int i; 0 <= i && i < k + 1; |Matrix[i]| == ExpTwo(k - i));
ensures (\forall int i; 0 <= i && i < k + 1; 0 <= |Matrix[i]| && |Matrix[i]| <= ExpTwo(k));
//ensures lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> flag_after_prefix[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid);
@*/
__syncthreads();
//@ assume tid < |temp_seq| && (indicator - 1) / 2 < ExpTwo(k) ==> temp_seq[tid] == flag_after_prefix[(indicator - 1) / 2];
//@ assume lvl-1 >= 0 && 2 * tid < |Matrix[lvl-1]| && (indicator - 1) / 2 < ExpTwo(k) && (indicator - 1) / 2 >= stride / 2 ==> flag_after_prefix[(indicator - 1) / 2 - stride / 2] == get(Matrix[lvl-1], 2 * tid);
indicator = (indicator - 1) / 2;
stride = stride / 2;
lvl = lvl - 1;
}
//@ assert indicator == tid;
//@ assert tid >= 0 && tid < ExpTwo(k);
//@ assert temp_seq == psum2(Matrix[0]);
//@ assert Matrix[0] == flag_seq;
//@ assert temp_seq == psum2(flag_seq);
//@ assert |temp_seq| == ExpTwo(k);
//@ assert temp_seq[tid] == flag_after_prefix[indicator];
//@ assert flag_after_prefix[indicator] == get(psum2(flag_seq), tid);
//@ assert lemma_intsum_flag(flag_seq, indicator);
//@ assert count(flag_seq) == ExpTwo(M);
//@ assert intsum(flag_seq) == ExpTwo(M);
//@ assert flag_seq[tid] == 1 ? lemma_sum_pos_flag(flag_seq, tid) : true;
//@ assert lemma_take_sum(flag_seq, tid);
//@ assert flag_seq[tid] == flag_before_prefix[indicator];
//@ assert flag_after_prefix[indicator] >= 0 && flag_after_prefix[indicator] <= ExpTwo(M);
//@ assert flag_before_prefix[indicator] == 1 ==> flag_after_prefix[indicator] >= 0 && flag_after_prefix[indicator] < ExpTwo(M);
/*@
requires indicator == tid;
requires tid >= 0 && tid < ExpTwo(k);
requires tid < ExpTwo(M) ==> \pointer_index(output, tid, 1);
requires \pointer_index(input, tid, 1\2);
requires \pointer_index(flag_before_prefix, tid, 1\2);
requires \pointer_index(flag_after_prefix, tid, 1);
requires |temp_seq| == ExpTwo(k);
requires temp_seq == psum2(flag_seq);
//requires temp_seq[tid] == flag_after_prefix[indicator];
requires |flag_seq| == ExpTwo(k);
requires flag_seq[tid] == flag_before_prefix[indicator];
requires |inp| == ExpTwo(k);
requires (inp[tid] == input[tid]);
ensures indicator == tid;
ensures tid >= 0 && tid < ExpTwo(k);
ensures |temp_seq| == ExpTwo(k);
ensures temp_seq == psum2(flag_seq);
ensures \pointer_index(input, tid, 1\2);
ensures \pointer_index(flag_before_prefix, tid, 1\2);
ensures \pointer_index(flag_after_prefix, tid, 1\2);
ensures flag_before_prefix[tid] == 1 ==> flag_after_prefix[tid] >= 0 && flag_after_prefix[tid] < ExpTwo(M);
ensures flag_before_prefix[tid] == 1 ==> \pointer_index(output, flag_after_prefix[tid], 1);
//ensures temp_seq[tid] == flag_after_prefix[indicator];
ensures |flag_seq| == ExpTwo(k);
ensures flag_seq[tid] == flag_before_prefix[indicator];
ensures |inp| == ExpTwo(k);
ensures (inp[tid] == input[tid]);
@*/
__syncthreads();
if(flag_before_prefix[tid] == 1){
output[flag_after_prefix[tid]] = input[tid];
//@ assert (output[flag_after_prefix[tid]] == input[tid]);
}
//@ assert flag_before_prefix[tid] == 1 ==> (output[flag_after_prefix[tid]] == input[tid]);
//@ ghost seq<int> temporary;
//@ ghost temporary = compact(inp, flag_seq);
//@ assert temporary == compact(inp, flag_seq);
//@ assert |temporary| == ExpTwo(M);
//@ assert intsum(Take(flag_seq, tid)) >= 0;
//@ assume temp_seq[tid] == flag_after_prefix[indicator];
//@ assert flag_after_prefix[tid] >= 0;
//@ assert \pointer_index(input, tid, 1\2);
//@ assert \pointer_index(flag_before_prefix, tid, 1\2);
//@ assert \pointer_index(flag_after_prefix, tid, 1\2);
//@ assert flag_before_prefix[tid] == 1 ==> \pointer_index(output, flag_after_prefix[tid], 1);
//@ assert flag_before_prefix[tid] == flag_seq[tid];
//@ assert flag_before_prefix[tid] == 1 ==> (lemma_correctness(inp, flag_seq, tid));
//@ assert flag_before_prefix[tid] == 1 ==> inp[tid] == get(compact(inp, flag_seq), intsum(Take(flag_seq, tid)));
// assert temporary == compact(inp, flag_seq);
//@ assert flag_before_prefix[tid] == 1 ==> (inp[tid] == get(temporary, intsum(Take(flag_seq, tid))));
//@ assert (inp[tid] == input[tid]);
//@ assert flag_before_prefix[tid] == 1 ==> (input[tid] == get(temporary, flag_after_prefix[tid]));
// assert flag_before_prefix[tid] == 1 ==> (output[flag_after_prefix[tid]] == input[tid]);
//@ assert flag_before_prefix[tid] == 1 ==> (output[flag_after_prefix[tid]] == get(temporary, flag_after_prefix[tid]));
}
|
346f904fddae3e1015fb5cc11e7d55107fa2a749.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include "common.h"
#include "gpuCudaLib.h"
#include <assert.h>
#ifdef HAS_GMM
#include "gmm.h"
#endif
// Define this to more rigorously avoid bank conflicts,
// even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance
// is lower with ZERO_BANK_CONFLICTS enabled. It is provided
// as an example.
//#define ZERO_BANK_CONFLICTS
// 16 banks on G80
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define BLOCK_SIZE 256
/*#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif*/
inline __device__ int CONFLICT_FREE_OFFSET(int index) {
// return ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS));
return ((index) >> LOG_NUM_BANKS);
}
template <bool isNP2>
__device__ static void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int &ai, int &bi,
int &mem_ai, int &mem_bi, int &bankOffsetA, int &bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) {
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
static __device__ void storeSharedChunkToMem(int *g_odata, const int *s_data, int n, int ai, int bi, int mem_ai,
int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) {
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum> static __device__ void clearLastElement(int *s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) // compile-time decision
{
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ static unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ static void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum> static __device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
// no shared memory
template <bool storeSum, bool isNP2>
__global__ void prescan(int *d_data, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex,
int baseIndex, unsigned int sharedMemSize) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
// extern __shared__ int s_data[];
int bx = blockIdx.x;
int *s_data = d_data + (sharedMemSize / sizeof(int)) * bx;
// load data into shared memory
loadSharedChunkFromMem<isNP2>(s_data, g_idata, n,
(baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, mem_ai,
mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
// with shared memory
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex,
int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(s_data, g_idata, n,
(baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, mem_ai,
mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
extern "C" __global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex, int total) {
__shared__ int uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
if (address + blockDim.x < total)
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
static inline bool isPowerOfTwo(int n) { return ((n & (n - 1)) == 0); }
static inline int floorPow2(int n) {
int exp;
frexp((int)n, &exp);
return 1 << (exp - 1);
}
static int **g_scanBlockSums;
static unsigned int g_numEltsAllocated = 0;
static unsigned int g_numLevelsAllocated = 0;
static void preallocBlockSums(unsigned int maxNumElements) {
assert(g_numEltsAllocated == 0);
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE;
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
level++;
}
numElts = numBlocks;
} while (numElts > 1);
// printf("level = %d\n", level);
g_scanBlockSums = (int **)malloc(level * sizeof(int *));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
hipMalloc((void **)&g_scanBlockSums[level++], numBlocks * sizeof(int));
}
numElts = numBlocks;
} while (numElts > 1);
}
static void deallocBlockSums() {
for (int i = 0; i < g_numLevelsAllocated; i++) {
CUDA_SAFE_CALL_NO_SYNC(hipFree(g_scanBlockSums[i]));
}
free((void **)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
static void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level, struct statistic *pp) {
unsigned int blockSize = BLOCK_SIZE;
unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
// printf("blocks(%u) threads(%u) elements(%d)\n", numBlocks, numThreads, numElements);
unsigned int numEltsPerBlock = numThreads * 2;
unsigned int numEltsLastBlock = numElements - (numBlocks - 1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
if (numBlocks > 1) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaAdvise(2, CADV_OUTPUT));
GMM_CALL(cudaSetFunction(136));
hipLaunchKernelGGL(( prescan<true, false>)
, dim3(grid), dim3(threads), sharedMemSize, 0, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaAdvise(2, CADV_OUTPUT));
GMM_CALL(cudaSetFunction(137));
hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock, 0,
outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level + 1, pp);
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(130));
hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads), 0, 0, outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0, numElements);
if (np2LastBlock) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(130));
hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock), 0, 0, outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1,
numElements - numEltsLastBlock, numElements);
}
} else if (isPowerOfTwo(numElements)) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(134));
hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize, 0, outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(135));
hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize, 0, outArray, inArray, 0, numElements, 0, 0);
}
}
static void prescanArray(int *outArray, int *inArray, int numElements, struct statistic *pp) {
prescanArrayRecursive(outArray, inArray, numElements, 0, pp);
}
void scanImpl(int *d_input, int rLen, int *d_output, struct statistic *pp) {
int len = 2;
if (rLen < len) {
int *input, *output;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&input, len * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&output, len * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(input, 0, len * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(input, d_input, rLen * sizeof(int), hipMemcpyDeviceToDevice));
preallocBlockSums(len);
prescanArray(output, input, len, pp);
deallocBlockSums();
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(d_output, output, rLen * sizeof(int), hipMemcpyDeviceToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipFree(input));
CUDA_SAFE_CALL_NO_SYNC(hipFree(output));
return;
} else {
preallocBlockSums(rLen);
prescanArray(d_output, d_input, rLen, pp);
deallocBlockSums();
}
// preallocBlockSums(rLen);
// prescanArray(d_output, d_input, rLen, pp);
// deallocBlockSums();
}
| 346f904fddae3e1015fb5cc11e7d55107fa2a749.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include "common.h"
#include "gpuCudaLib.h"
#include <assert.h>
#ifdef HAS_GMM
#include "gmm.h"
#endif
// Define this to more rigorously avoid bank conflicts,
// even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance
// is lower with ZERO_BANK_CONFLICTS enabled. It is provided
// as an example.
//#define ZERO_BANK_CONFLICTS
// 16 banks on G80
#define NUM_BANKS 16
#define LOG_NUM_BANKS 4
#define BLOCK_SIZE 256
/*#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif*/
inline __device__ int CONFLICT_FREE_OFFSET(int index) {
// return ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS));
return ((index) >> LOG_NUM_BANKS);
}
template <bool isNP2>
__device__ static void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int &ai, int &bi,
int &mem_ai, int &mem_bi, int &bankOffsetA, int &bankOffsetB) {
int thid = threadIdx.x;
mem_ai = baseIndex + threadIdx.x;
mem_bi = mem_ai + blockDim.x;
ai = thid;
bi = thid + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
s_data[ai + bankOffsetA] = g_idata[mem_ai];
if (isNP2) {
s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0;
} else {
s_data[bi + bankOffsetB] = g_idata[mem_bi];
}
}
template <bool isNP2>
static __device__ void storeSharedChunkToMem(int *g_odata, const int *s_data, int n, int ai, int bi, int mem_ai,
int mem_bi, int bankOffsetA, int bankOffsetB) {
__syncthreads();
g_odata[mem_ai] = s_data[ai + bankOffsetA];
if (isNP2) {
if (bi < n)
g_odata[mem_bi] = s_data[bi + bankOffsetB];
} else {
g_odata[mem_bi] = s_data[bi + bankOffsetB];
}
}
template <bool storeSum> static __device__ void clearLastElement(int *s_data, int *g_blockSums, int blockIndex) {
if (threadIdx.x == 0) {
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if (storeSum) // compile-time decision
{
// write this block's total sum to the corresponding index in the blockSums array
g_blockSums[blockIndex] = s_data[index];
}
// zero the last element in the scan so it will propagate back to the front
s_data[index] = 0;
}
}
__device__ static unsigned int buildSum(int *s_data) {
unsigned int thid = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for (int d = blockDim.x; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
stride *= 2;
}
return stride;
}
__device__ static void scanRootToLeaves(int *s_data, unsigned int stride) {
unsigned int thid = threadIdx.x;
// traverse down the tree building the scan in place
for (int d = 1; d <= blockDim.x; d *= 2) {
stride >>= 1;
__syncthreads();
if (thid < d) {
int i = __mul24(__mul24(2, stride), thid);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += t;
}
}
}
template <bool storeSum> static __device__ void prescanBlock(int *data, int blockIndex, int *blockSums) {
int stride = buildSum(data); // build the sum in place up the tree
clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
scanRootToLeaves(data, stride); // traverse down tree to build the scan
}
// no shared memory
template <bool storeSum, bool isNP2>
__global__ void prescan(int *d_data, int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex,
int baseIndex, unsigned int sharedMemSize) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
// extern __shared__ int s_data[];
int bx = blockIdx.x;
int *s_data = d_data + (sharedMemSize / sizeof(int)) * bx;
// load data into shared memory
loadSharedChunkFromMem<isNP2>(s_data, g_idata, n,
(baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, mem_ai,
mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
// with shared memory
template <bool storeSum, bool isNP2>
__global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex,
int baseIndex) {
int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;
extern __shared__ int s_data[];
// load data into shared memory
loadSharedChunkFromMem<isNP2>(s_data, g_idata, n,
(baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, mem_ai,
mem_bi, bankOffsetA, bankOffsetB);
// scan the data in each block
prescanBlock<storeSum>(s_data, blockIndex, g_blockSums);
// write results to device memory
storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB);
}
extern "C" __global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex, int total) {
__shared__ int uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
if (address + blockDim.x < total)
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
static inline bool isPowerOfTwo(int n) { return ((n & (n - 1)) == 0); }
static inline int floorPow2(int n) {
int exp;
frexp((int)n, &exp);
return 1 << (exp - 1);
}
static int **g_scanBlockSums;
static unsigned int g_numEltsAllocated = 0;
static unsigned int g_numLevelsAllocated = 0;
static void preallocBlockSums(unsigned int maxNumElements) {
assert(g_numEltsAllocated == 0);
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE;
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
level++;
}
numElts = numBlocks;
} while (numElts > 1);
// printf("level = %d\n", level);
g_scanBlockSums = (int **)malloc(level * sizeof(int *));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize)));
if (numBlocks > 1) {
cudaMalloc((void **)&g_scanBlockSums[level++], numBlocks * sizeof(int));
}
numElts = numBlocks;
} while (numElts > 1);
}
static void deallocBlockSums() {
for (int i = 0; i < g_numLevelsAllocated; i++) {
CUDA_SAFE_CALL_NO_SYNC(cudaFree(g_scanBlockSums[i]));
}
free((void **)g_scanBlockSums);
g_scanBlockSums = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
static void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level, struct statistic *pp) {
unsigned int blockSize = BLOCK_SIZE;
unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize)));
unsigned int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = floorPow2(numElements);
// printf("blocks(%u) threads(%u) elements(%d)\n", numBlocks, numThreads, numElements);
unsigned int numEltsPerBlock = numThreads * 2;
unsigned int numEltsLastBlock = numElements - (numBlocks - 1) * numEltsPerBlock;
unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if (!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
unsigned int extraSpace = numEltsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
if (numBlocks > 1) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaAdvise(2, CADV_OUTPUT));
GMM_CALL(cudaSetFunction(136));
prescan<true, false>
<<<grid, threads, sharedMemSize>>>(outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaAdvise(2, CADV_OUTPUT));
GMM_CALL(cudaSetFunction(137));
prescan<true, true><<<1, numThreadsLastBlock, sharedMemLastBlock>>>(
outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level + 1, pp);
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(130));
uniformAdd<<<grid, threads>>>(outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0, numElements);
if (np2LastBlock) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(130));
uniformAdd<<<1, numThreadsLastBlock>>>(outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1,
numElements - numEltsLastBlock, numElements);
}
} else if (isPowerOfTwo(numElements)) {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(134));
prescan<false, false><<<grid, threads, sharedMemSize>>>(outArray, inArray, 0, numThreads * 2, 0, 0);
} else {
GMM_CALL(cudaAdvise(0, CADV_OUTPUT));
GMM_CALL(cudaAdvise(1, CADV_INPUT));
GMM_CALL(cudaSetFunction(135));
prescan<false, true><<<grid, threads, sharedMemSize>>>(outArray, inArray, 0, numElements, 0, 0);
}
}
static void prescanArray(int *outArray, int *inArray, int numElements, struct statistic *pp) {
prescanArrayRecursive(outArray, inArray, numElements, 0, pp);
}
void scanImpl(int *d_input, int rLen, int *d_output, struct statistic *pp) {
int len = 2;
if (rLen < len) {
int *input, *output;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&input, len * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&output, len * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(input, 0, len * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(input, d_input, rLen * sizeof(int), cudaMemcpyDeviceToDevice));
preallocBlockSums(len);
prescanArray(output, input, len, pp);
deallocBlockSums();
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(d_output, output, rLen * sizeof(int), cudaMemcpyDeviceToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(input));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(output));
return;
} else {
preallocBlockSums(rLen);
prescanArray(d_output, d_input, rLen, pp);
deallocBlockSums();
}
// preallocBlockSums(rLen);
// prescanArray(d_output, d_input, rLen, pp);
// deallocBlockSums();
}
|
8727be356e8a28c55381def1445f922f70bdaf40.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include "../commonMac/cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
//
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
//
int main( void ) {
DataBlock data;
// capture the start time
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
//
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
//
// allocate memory on the GPU for the output bitmap
hipMalloc( (void**)&dev_bitmap,
bitmap.image_size() );
// allocate memory for the Sphere dataset
hipMalloc( (void**)&s,
sizeof(Sphere) * SPHERES );
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
hipMemcpy( s, temp_s,
sizeof(Sphere) * SPHERES,
hipMemcpyHostToDevice );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, s, dev_bitmap );
// copy our bitmap back from the GPU for display
hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost );
// get stop time, and display the timing results
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime,
start, stop );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
hipEventDestroy( start );
hipEventDestroy( stop );
hipFree( dev_bitmap );
hipFree( s );
// display
bitmap.display_and_exit();
}
| 8727be356e8a28c55381def1445f922f70bdaf40.cu | #include "cuda.h"
#include "../common/book.h"
#include "../commonMac/cpu_bitmap.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
//
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
//
int main( void ) {
DataBlock data;
// capture the start time
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
//
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
//
// allocate memory on the GPU for the output bitmap
cudaMalloc( (void**)&dev_bitmap,
bitmap.image_size() );
// allocate memory for the Sphere dataset
cudaMalloc( (void**)&s,
sizeof(Sphere) * SPHERES );
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
cudaMemcpy( s, temp_s,
sizeof(Sphere) * SPHERES,
cudaMemcpyHostToDevice );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<grids,threads>>>( s, dev_bitmap );
// copy our bitmap back from the GPU for display
cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost );
// get stop time, and display the timing results
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime,
start, stop );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaFree( dev_bitmap );
cudaFree( s );
// display
bitmap.display_and_exit();
}
|
7b1b6922ddc73182a35a2fe3abcfb856455dd510.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "layer_updater_cuda.h"
#include "util_cuda.h"
namespace nnforge
{
namespace cuda
{
layer_updater_cuda::layer_updater_cuda()
{
}
layer_updater_cuda::~layer_updater_cuda()
{
}
void layer_updater_cuda::configure(
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific,
const_layer_smart_ptr layer_schema,
cuda_running_configuration_const_smart_ptr cuda_config,
bool backprop_required,
bool different_input)
{
this->layer_schema = layer_schema;
this->input_configuration_specific = input_configuration_specific;
this->output_configuration_specific = output_configuration_specific;
this->cuda_config = cuda_config;
this->backprop_required = backprop_required;
this->different_input = different_input;
input_elem_count_per_entry = input_configuration_specific.get_neuron_count();
output_elem_count_per_entry = output_configuration_specific.get_neuron_count();
input_elem_count_per_feature_map = input_configuration_specific.get_neuron_count_per_feature_map();
output_elem_count_per_feature_map = output_configuration_specific.get_neuron_count_per_feature_map();
updater_configured();
}
void layer_updater_cuda::updater_configured()
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
return std::vector<size_t>();
}
std::vector<unsigned int> layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const
{
return std::vector<unsigned int>();
}
void layer_updater_cuda::update_buffer_configuration(buffer_cuda_size_configuration& buffer_configuration) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_per_entry_buffer(*it);
std::vector<size_t> fixed_sized = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sized.begin(); it != fixed_sized.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_per_entry_buffer(output_elem_count_per_entry * sizeof(float));
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_per_entry_buffer(input_elem_count_per_entry * sizeof(float));
std::vector<unsigned int> tex_per_entry = get_linear_addressing_through_texture_per_entry();
for(std::vector<unsigned int>::const_iterator it = tex_per_entry.begin(); it != tex_per_entry.end(); ++it)
buffer_configuration.add_per_entry_linear_addressing_through_texture(*it);
}
void layer_updater_cuda::update_buffer_configuration(
buffer_cuda_size_configuration& buffer_configuration,
unsigned int updater_entry_count) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it * updater_entry_count);
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_constant_buffer(output_elem_count_per_entry * sizeof(float) * updater_entry_count);
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_constant_buffer(input_elem_count_per_entry * sizeof(float) * updater_entry_count);
}
layer_updater_cuda::buffer_set layer_updater_cuda::allocate_all_buffers(unsigned int max_entry_count)
{
buffer_set res;
set_max_entry_count(max_entry_count);
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it * max_entry_count)));
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it)));
fill_additional_buffers(res.additional_buffers);
{
size_t sz = output_elem_count_per_entry * sizeof(float) * max_entry_count;
res.output_neurons_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
if (backprop_required && !is_in_place_backprop())
{
size_t sz = input_elem_count_per_entry * sizeof(float) * max_entry_count;
res.input_errors_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
res.dynamic_memobjects.resize(get_dynamic_memobject_count());
return res;
}
void layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
}
void layer_updater_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_fixed() const
{
return std::vector<size_t>();
}
void layer_updater_cuda::set_max_entry_count(unsigned int max_entry_count)
{
}
int layer_updater_cuda::get_dynamic_memobject_count() const
{
return 0;
}
}
}
| 7b1b6922ddc73182a35a2fe3abcfb856455dd510.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "layer_updater_cuda.h"
#include "util_cuda.h"
namespace nnforge
{
namespace cuda
{
layer_updater_cuda::layer_updater_cuda()
{
}
layer_updater_cuda::~layer_updater_cuda()
{
}
void layer_updater_cuda::configure(
const layer_configuration_specific& input_configuration_specific,
const layer_configuration_specific& output_configuration_specific,
const_layer_smart_ptr layer_schema,
cuda_running_configuration_const_smart_ptr cuda_config,
bool backprop_required,
bool different_input)
{
this->layer_schema = layer_schema;
this->input_configuration_specific = input_configuration_specific;
this->output_configuration_specific = output_configuration_specific;
this->cuda_config = cuda_config;
this->backprop_required = backprop_required;
this->different_input = different_input;
input_elem_count_per_entry = input_configuration_specific.get_neuron_count();
output_elem_count_per_entry = output_configuration_specific.get_neuron_count();
input_elem_count_per_feature_map = input_configuration_specific.get_neuron_count_per_feature_map();
output_elem_count_per_feature_map = output_configuration_specific.get_neuron_count_per_feature_map();
updater_configured();
}
void layer_updater_cuda::updater_configured()
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
return std::vector<size_t>();
}
std::vector<unsigned int> layer_updater_cuda::get_linear_addressing_through_texture_per_entry() const
{
return std::vector<unsigned int>();
}
void layer_updater_cuda::update_buffer_configuration(buffer_cuda_size_configuration& buffer_configuration) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_per_entry_buffer(*it);
std::vector<size_t> fixed_sized = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sized.begin(); it != fixed_sized.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_per_entry_buffer(output_elem_count_per_entry * sizeof(float));
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_per_entry_buffer(input_elem_count_per_entry * sizeof(float));
std::vector<unsigned int> tex_per_entry = get_linear_addressing_through_texture_per_entry();
for(std::vector<unsigned int>::const_iterator it = tex_per_entry.begin(); it != tex_per_entry.end(); ++it)
buffer_configuration.add_per_entry_linear_addressing_through_texture(*it);
}
void layer_updater_cuda::update_buffer_configuration(
buffer_cuda_size_configuration& buffer_configuration,
unsigned int updater_entry_count) const
{
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it * updater_entry_count);
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
buffer_configuration.add_constant_buffer(*it);
buffer_configuration.add_constant_buffer(output_elem_count_per_entry * sizeof(float) * updater_entry_count);
if (backprop_required && !is_in_place_backprop())
buffer_configuration.add_constant_buffer(input_elem_count_per_entry * sizeof(float) * updater_entry_count);
}
layer_updater_cuda::buffer_set layer_updater_cuda::allocate_all_buffers(unsigned int max_entry_count)
{
buffer_set res;
set_max_entry_count(max_entry_count);
std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it * max_entry_count)));
std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it)));
fill_additional_buffers(res.additional_buffers);
{
size_t sz = output_elem_count_per_entry * sizeof(float) * max_entry_count;
res.output_neurons_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
if (backprop_required && !is_in_place_backprop())
{
size_t sz = input_elem_count_per_entry * sizeof(float) * max_entry_count;
res.input_errors_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
}
res.dynamic_memobjects.resize(get_dynamic_memobject_count());
return res;
}
void layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
}
void layer_updater_cuda::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
}
std::vector<size_t> layer_updater_cuda::get_sizes_of_additional_buffers_fixed() const
{
return std::vector<size_t>();
}
void layer_updater_cuda::set_max_entry_count(unsigned int max_entry_count)
{
}
int layer_updater_cuda::get_dynamic_memobject_count() const
{
return 0;
}
}
}
|
444afc88ed05334b81d52a318d0dc1534db20b66.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <ctime>
#include "util.h"
#include "CPU_Jacobi.h"
#include "GPU_Unified.h"
#include "GPU_MemCopy.h"
#include "GPU_Shared.h"
//#include "GPU_Stream.h"
#include "GPU_MemCoa.h"
#include "GPU_Reduce.h"
#include "GPU_Reduce2.h"
#include "GPU_Reduce3.h"
double exeTime ;
int g_Block_size ;
int main(int argc, char *argv[]){
// Parsing input
if( argc != 3 )
cerr << " Usage: ./Jacobi input_n mode\n" ;
int mode = stoi(argv[2]) ; // 0: CPU , 1: Unified, 2: Memcopy, 3:Shared, 4:....
//g_Block_size = stoi(argv[3]) ; // 0: CPU , 1: Unified, 2: Memcopy, 3:Shared, 4:....
string input_num = argv[1] ;
string filename = "inputs/" + input_num + ".txt" ;
//Declare General Variables
int n , iter ;
float* input ;
float *sol , *x_k , *x_k1 ;
getinput( filename , n , iter , input , sol ) ;
printf( " n = %d \n" , n ) ;
x_k = new float[n] ;
x_k1 = new float[n] ;
for( int i = 0 ; i < n ; i++)
x_k[i] = 0 ;
// Tans input
float* t_input = new float[ n*n ] ;
for( int i = 0 ; i < n ; i++){
for( int j = 0 ; j < n ; j ++ ){
t_input[ j*n + i ] = input[ i*n + j ] ;
}
}
//Implements
clock_t c_start = clock();
//Mem copy
//clock_t c_mem_start = clock();
if( mode == 0 )
CPU_Jacobi( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 1 )
GPU_Unified( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 2 )
GPU_MemCopy( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 3 )
GPU_Shared( n , iter , t_input , sol , x_k , x_k1 ) ;
else if( mode == 4 )
GPU_Memcoalesc( n , iter , t_input , sol , x_k , x_k1 ) ;
else if( mode == 5 )
GPU_Reduction( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 6 )
GPU_Reduction2( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 7 )
GPU_Reduction3( n , iter , input , sol , x_k , x_k1 ) ;
//clock_t c_mem_end = clock();
//Kernel Call
//clock_t c_exe_start = clock();
//clock_t c_exe_end = clock();
//Mem copy back
//clock_t c_memback_start = clock();
//clock_t c_memback_end = clock();
clock_t c_end = clock();
//Verification
float* res = MatrixMultiple( input , x_k , n) ;
bool check = true ;
//print_1D_array( n , "x" , x_k ) ;
for( int i = 0 ; i < n ; i++){
if( abs(res[i]-sol[i]) > 1){
printf( "Answer is wrong !! \n" );
check = false;
}
// printf( " res[%d] = %f | sol[%d] = %f \n" , i , res[i] , i , sol[i] ) ;
}
if( check ){
printf( "Answer is correct \n");
}
hipDeviceSynchronize();
//Delete
delete[] input ;
delete[] sol ;
delete[] x_k ;
delete[] x_k1 ;
delete[] res ;
/*
double memcopy_time = 1000.0 * (c_mem_end-c_mem_start) / CLOCKS_PER_SEC;
cout << "Memcopy time used: " << memback_time/1000.0 << " s\n";
double exe_time = 1000.0 * (c_exe_end - c_exe_start) / CLOCKS_PER_SEC;
cout << "Execute time used: " << exe_time /1000.0 << " s\n";
double memback_time = 1000.0 * (c_memback_end-c_memback_start) / CLOCKS_PER_SEC;
cout << "Memback time used: " << memback_time/1000.0 << " s\n";
*/
double time_elapsed_ms = 1000.0 * (c_end-c_start) / CLOCKS_PER_SEC;
cout << "Total time used: " << time_elapsed_ms/1000.0 << " s\n";
cout << "Kernel time used: " << exeTime/1000.0 << " s\n";
return 0;
}
| 444afc88ed05334b81d52a318d0dc1534db20b66.cu |
#include <iostream>
#include <string>
#include <ctime>
#include "util.h"
#include "CPU_Jacobi.h"
#include "GPU_Unified.h"
#include "GPU_MemCopy.h"
#include "GPU_Shared.h"
//#include "GPU_Stream.h"
#include "GPU_MemCoa.h"
#include "GPU_Reduce.h"
#include "GPU_Reduce2.h"
#include "GPU_Reduce3.h"
double exeTime ;
int g_Block_size ;
int main(int argc, char *argv[]){
// Parsing input
if( argc != 3 )
cerr << " Usage: ./Jacobi input_n mode\n" ;
int mode = stoi(argv[2]) ; // 0: CPU , 1: Unified, 2: Memcopy, 3:Shared, 4:....
//g_Block_size = stoi(argv[3]) ; // 0: CPU , 1: Unified, 2: Memcopy, 3:Shared, 4:....
string input_num = argv[1] ;
string filename = "inputs/" + input_num + ".txt" ;
//Declare General Variables
int n , iter ;
float* input ;
float *sol , *x_k , *x_k1 ;
getinput( filename , n , iter , input , sol ) ;
printf( " n = %d \n" , n ) ;
x_k = new float[n] ;
x_k1 = new float[n] ;
for( int i = 0 ; i < n ; i++)
x_k[i] = 0 ;
// Tans input
float* t_input = new float[ n*n ] ;
for( int i = 0 ; i < n ; i++){
for( int j = 0 ; j < n ; j ++ ){
t_input[ j*n + i ] = input[ i*n + j ] ;
}
}
//Implements
clock_t c_start = clock();
//Mem copy
//clock_t c_mem_start = clock();
if( mode == 0 )
CPU_Jacobi( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 1 )
GPU_Unified( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 2 )
GPU_MemCopy( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 3 )
GPU_Shared( n , iter , t_input , sol , x_k , x_k1 ) ;
else if( mode == 4 )
GPU_Memcoalesc( n , iter , t_input , sol , x_k , x_k1 ) ;
else if( mode == 5 )
GPU_Reduction( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 6 )
GPU_Reduction2( n , iter , input , sol , x_k , x_k1 ) ;
else if( mode == 7 )
GPU_Reduction3( n , iter , input , sol , x_k , x_k1 ) ;
//clock_t c_mem_end = clock();
//Kernel Call
//clock_t c_exe_start = clock();
//clock_t c_exe_end = clock();
//Mem copy back
//clock_t c_memback_start = clock();
//clock_t c_memback_end = clock();
clock_t c_end = clock();
//Verification
float* res = MatrixMultiple( input , x_k , n) ;
bool check = true ;
//print_1D_array( n , "x" , x_k ) ;
for( int i = 0 ; i < n ; i++){
if( abs(res[i]-sol[i]) > 1){
printf( "Answer is wrong !! \n" );
check = false;
}
// printf( " res[%d] = %f | sol[%d] = %f \n" , i , res[i] , i , sol[i] ) ;
}
if( check ){
printf( "Answer is correct \n");
}
cudaDeviceSynchronize();
//Delete
delete[] input ;
delete[] sol ;
delete[] x_k ;
delete[] x_k1 ;
delete[] res ;
/*
double memcopy_time = 1000.0 * (c_mem_end-c_mem_start) / CLOCKS_PER_SEC;
cout << "Memcopy time used: " << memback_time/1000.0 << " s\n";
double exe_time = 1000.0 * (c_exe_end - c_exe_start) / CLOCKS_PER_SEC;
cout << "Execute time used: " << exe_time /1000.0 << " s\n";
double memback_time = 1000.0 * (c_memback_end-c_memback_start) / CLOCKS_PER_SEC;
cout << "Memback time used: " << memback_time/1000.0 << " s\n";
*/
double time_elapsed_ms = 1000.0 * (c_end-c_start) / CLOCKS_PER_SEC;
cout << "Total time used: " << time_elapsed_ms/1000.0 << " s\n";
cout << "Kernel time used: " << exeTime/1000.0 << " s\n";
return 0;
}
|
4edf2b09843c9942700360fbd5f142908019a538.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdlib>
#include <iostream>
#define NUM_BLOCKS 16
#define NUM_THREADS_PER_BLOCK 16
#define N 16
using namespace std;
hipEvent_t start, stop;
void startKernelTime (void) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
}
void stopKernelTime (void) {
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << " ms have elapsed for the CUDA execution" << endl;
}
void checkCUDAError (const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
cerr << "Cuda error: " << msg << ", " << hipGetErrorString( err) << endl;
exit(-1);
}
}
void printMatrix(char c, float *M) {
cout << c << endl;
for (int i = 0; i < N; i++) {
cout << endl;
for (int j = 0; j < N; j++) {
cout << M[i * N + j] << " ";
}
}
cout << "\n\n";
}
void fillMatrices(float *A, float *B) {
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
A[i*N+j] = ((float)rand()) / ((float)RAND_MAX);
B[i*N+j] = 1;
}
}
}
__global__ void matMultKernel_ijk(float *A, float *B, float *C) {
int i = blockIdx.x;
int j = threadIdx.x;
if (i < N && j < N) {
C[i * N + j] = 0;
for (int k = 0; k < N; k++) {
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
}
__global__ void matMultKernel_ikj(float *A, float *B, float *C) {
int i = blockIdx.x;
int k = threadIdx.x;
if (i < N && k < N/* && k >= 1 */) {
for (int j = 0; j < N; j++) {
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
// void matMult_ikj(float *A, float *B, float *C, int N) {
// for (int i = 0; i < N; i++) {
// for (int j = 0; j < N; j++) { //1 iterao necessrio definir ter a matriz resultado a 0
// C[i*N+j] = A[i*N+0] * B[0*N+j];
// }
// for (int k = 1; k < N; k++) {
// for (int j = 0; j < N; j++) {
// C[i*N+j] += A[i*N+k] * B[k*N+j];
// }
// }
// }
// }
}
void matMultGPU(float *A, float *B, float *C) {
// declare variable with size of the array in bytes
int bytes = N * N * sizeof(float);
// pointers to the device memory
float *dA, *dB, *dC;
// allocate the memory on the device
hipMalloc((void**) &dA, bytes);
hipMalloc((void**) &dB, bytes);
hipMalloc((void**) &dC, bytes);
startKernelTime();
checkCUDAError("mem allocation");
// copy inputs to the device
hipMemcpy(dA, A, bytes, hipMemcpyHostToDevice);
hipMemcpy(dB, B, bytes, hipMemcpyHostToDevice);
//hipMemcpy(dC, C, bytes, hipMemcpyHostToDevice); /** Temporrio **/
checkCUDAError("memcpy h->d");
// launch the kernel
hipLaunchKernelGGL(( matMultKernel_ijk) , dim3(NUM_THREADS_PER_BLOCK), dim3(NUM_BLOCKS) , 0, 0, dA, dB, dC);
// matMultKernel_ikj <<< NUM_THREADS_PER_BLOCK, NUM_BLOCKS >>> (dA, dB, dC);
checkCUDAError("kernel invocation");
// copy the output to the host
hipMemcpy(C, dC, bytes, hipMemcpyDeviceToHost);
checkCUDAError("memcpy d->h");
stopKernelTime();
// free the device memory
hipFree(dA); hipFree(dB); hipFree(dC);
checkCUDAError("mem free");
}
int main(int argc, char const *argv[]) {
float A[N * N];
float B[N * N];
float C[N * N];
fillMatrices(A, B);
// /** Temporrio **/
// for (int i = 0; i < N * N; i++) {
// C[i] = 0;
// }
matMultGPU(A, B, C);
printMatrix('A', A);
printMatrix('B', B);
printMatrix('C', C);
}
| 4edf2b09843c9942700360fbd5f142908019a538.cu | #include <cstdlib>
#include <iostream>
#define NUM_BLOCKS 16
#define NUM_THREADS_PER_BLOCK 16
#define N 16
using namespace std;
cudaEvent_t start, stop;
void startKernelTime (void) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
}
void stopKernelTime (void) {
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << " ms have elapsed for the CUDA execution" << endl;
}
void checkCUDAError (const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
cerr << "Cuda error: " << msg << ", " << cudaGetErrorString( err) << endl;
exit(-1);
}
}
void printMatrix(char c, float *M) {
cout << c << endl;
for (int i = 0; i < N; i++) {
cout << endl;
for (int j = 0; j < N; j++) {
cout << M[i * N + j] << " ";
}
}
cout << "\n\n";
}
void fillMatrices(float *A, float *B) {
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
A[i*N+j] = ((float)rand()) / ((float)RAND_MAX);
B[i*N+j] = 1;
}
}
}
__global__ void matMultKernel_ijk(float *A, float *B, float *C) {
int i = blockIdx.x;
int j = threadIdx.x;
if (i < N && j < N) {
C[i * N + j] = 0;
for (int k = 0; k < N; k++) {
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
}
__global__ void matMultKernel_ikj(float *A, float *B, float *C) {
int i = blockIdx.x;
int k = threadIdx.x;
if (i < N && k < N/* && k >= 1 */) {
for (int j = 0; j < N; j++) {
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
// void matMult_ikj(float *A, float *B, float *C, int N) {
// for (int i = 0; i < N; i++) {
// for (int j = 0; j < N; j++) { //1ยช iteraรงรฃo รฉ necessรกrio definir ter a matriz resultado a 0
// C[i*N+j] = A[i*N+0] * B[0*N+j];
// }
// for (int k = 1; k < N; k++) {
// for (int j = 0; j < N; j++) {
// C[i*N+j] += A[i*N+k] * B[k*N+j];
// }
// }
// }
// }
}
void matMultGPU(float *A, float *B, float *C) {
// declare variable with size of the array in bytes
int bytes = N * N * sizeof(float);
// pointers to the device memory
float *dA, *dB, *dC;
// allocate the memory on the device
cudaMalloc((void**) &dA, bytes);
cudaMalloc((void**) &dB, bytes);
cudaMalloc((void**) &dC, bytes);
startKernelTime();
checkCUDAError("mem allocation");
// copy inputs to the device
cudaMemcpy(dA, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, bytes, cudaMemcpyHostToDevice);
//cudaMemcpy(dC, C, bytes, cudaMemcpyHostToDevice); /** Temporรกrio **/
checkCUDAError("memcpy h->d");
// launch the kernel
matMultKernel_ijk <<< NUM_THREADS_PER_BLOCK, NUM_BLOCKS >>> (dA, dB, dC);
// matMultKernel_ikj <<< NUM_THREADS_PER_BLOCK, NUM_BLOCKS >>> (dA, dB, dC);
checkCUDAError("kernel invocation");
// copy the output to the host
cudaMemcpy(C, dC, bytes, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy d->h");
stopKernelTime();
// free the device memory
cudaFree(dA); cudaFree(dB); cudaFree(dC);
checkCUDAError("mem free");
}
int main(int argc, char const *argv[]) {
float A[N * N];
float B[N * N];
float C[N * N];
fillMatrices(A, B);
// /** Temporรกrio **/
// for (int i = 0; i < N * N; i++) {
// C[i] = 0;
// }
matMultGPU(A, B, C);
printMatrix('A', A);
printMatrix('B', B);
printMatrix('C', C);
}
|
de1095c350697601cc7fd4453806e53997e23d1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = -1.8482E-36f;
var_2[i] = (var_4 / +1.3383E-35f + var_5 / +1.1280E34f * asinf((-1.8207E-18f - (var_6 / (+1.6148E-42f + -1.9208E-25f + +1.6295E35f)))));
comp = var_2[i] * tmp_1 / var_7 / var_8 - (+1.0124E-35f * +1.1107E36f / +1.9785E-37f);
comp = (var_9 * -1.2353E-14f * var_10);
for (int i=0; i < var_3; ++i) {
comp += var_11 + var_12;
float tmp_2 = -1.8156E34f;
comp = tmp_2 / (var_13 / atanf(powf((var_14 * var_15 - (var_16 - var_17)), var_18 * +1.1786E36f)));
}
if (comp <= (+1.9894E35f * asinf((-1.7244E34f + fmodf(var_19 / (-1.9973E-37f / -1.7613E26f), +1.1054E-22f))))) {
comp += var_20 / -0.0f;
comp += var_21 * -1.1049E34f + expf(+1.3526E-16f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
hipDeviceSynchronize();
return 0;
}
| de1095c350697601cc7fd4453806e53997e23d1a.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = -1.8482E-36f;
var_2[i] = (var_4 / +1.3383E-35f + var_5 / +1.1280E34f * asinf((-1.8207E-18f - (var_6 / (+1.6148E-42f + -1.9208E-25f + +1.6295E35f)))));
comp = var_2[i] * tmp_1 / var_7 / var_8 - (+1.0124E-35f * +1.1107E36f / +1.9785E-37f);
comp = (var_9 * -1.2353E-14f * var_10);
for (int i=0; i < var_3; ++i) {
comp += var_11 + var_12;
float tmp_2 = -1.8156E34f;
comp = tmp_2 / (var_13 / atanf(powf((var_14 * var_15 - (var_16 - var_17)), var_18 * +1.1786E36f)));
}
if (comp <= (+1.9894E35f * asinf((-1.7244E34f + fmodf(var_19 / (-1.9973E-37f / -1.7613E26f), +1.1054E-22f))))) {
comp += var_20 / -0.0f;
comp += var_21 * -1.1049E34f + expf(+1.3526E-16f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
}
|
3f55a33d87ba3f7a3c8aaa369765cd623e4459d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Cambricon Corporation: 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v,
Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = m[i] = m[i]*beta1 + gi*(1-beta1);
float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2);
g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat);
}
}
template <typename Dtype>
void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1,
Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) {
AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void adam_update_gpu<float>(int, float*, float*, float*,
float, float, float, float);
template void adam_update_gpu<double>(int, double*, double*, double*,
double, double, double, double);
} // namespace caffe
| 3f55a33d87ba3f7a3c8aaa369765cd623e4459d5.cu | /*
All modification made by Cambricon Corporation: ยฉ 2018 Cambricon Corporation
All rights reserved.
All other contributions:
Copyright (c) 2014--2018, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdamUpdate(int N, Dtype* g, Dtype* m, Dtype* v,
Dtype beta1, Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) {
CUDA_KERNEL_LOOP(i, N) {
float gi = g[i];
float mi = m[i] = m[i]*beta1 + gi*(1-beta1);
float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2);
g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat);
}
}
template <typename Dtype>
void adam_update_gpu(int N, Dtype* g, Dtype* m, Dtype* v, Dtype beta1,
Dtype beta2, Dtype eps_hat, Dtype corrected_local_rate) {
AdamUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate);
CUDA_POST_KERNEL_CHECK;
}
template void adam_update_gpu<float>(int, float*, float*, float*,
float, float, float, float);
template void adam_update_gpu<double>(int, double*, double*, double*,
double, double, double, double);
} // namespace caffe
|
9924a811b87018eb06cb9ba71b1ca0fc96c98ede.hip | // !!! This is a file automatically generated by hipify!!!
//
// main.c
// problem_2
//
// Created by Alex CONG on 6/6/2016.
// Copyright 2016 Alex CONG. All rights reserved.
// Update from problem_2 of project 3
// Generate the matrix of U[y][x]
// With the boundary condition u(-1,y,t)=y, u(1,y,t)=-y, u(x,-1,t)=x, u(x,1,t)=-x
// Apply ADI method to find ut=nu*(uxx+uyy);
// in kernel
// {
// explicit the localU
// transpose them
// Lapack to solve diagonal matrix
// explicit the localU
// transpose them
// Lapack to solve diagonal matrix
// }
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <magma.h>
#include <magma_types.h>
#include <magma_lapack.h>
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
__device__ static int dev_N; // global variable for dimension N in kernel
__device__ static double dev_lamda; // lamda is coefficent nu*dt/2/(dx*dx)
const int gendimT=1024;// number of general threads number
__global__ void solve(double* u, double* unew){
__shared__ double localu[gendimT]; // local memory
int row,col; // row and column for the local u
int id_local;
int id=blockIdx.x*dev_N+threadIdx.x;
if (threadIdx.x<dev_N && blockIdx.x<dev_N)
{
id_local=threadIdx.x;
row=id/dev_N;
col=id%dev_N;
localu[id_local]=u[id];
__syncthreads();
if (row!=0 && row!=dev_N-1 && col!=0 && col!=dev_N-1)
{
unew[id]=localu[id_local]+dev_lamda*(localu[id_local+1]-2*localu[id_local]+localu[id_local-1]);
}
}
}
__global__ void transpose(double* u,double* unew){
__shared__ double localu[gendimT]; // local memory
int id=blockIdx.x*dev_N+threadIdx.x;
if (threadIdx.x<dev_N && blockIdx.x<dev_N)
{
int row=id/dev_N;
int col=id%dev_N;
localu[threadIdx.x]=u[id];
__syncthreads();
unew[col*dev_N+row]=localu[threadIdx.x];
}
}
int main(int argc,char * argv[]) {
//initial the cuda
hipDeviceProp_t prop;
int dev;
memset( &prop, 0, sizeof(hipDeviceProp_t));
prop.multiProcessorCount = 13;
prop.major = 3;
prop.minor = 5;
hipChooseDevice(&dev, &prop);
hipSetDevice(dev);
//create the event and record times
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
float elapsedTime;
FILE* outfile=fopen(argv[2], "w");
// start reading input data using function fscanf here
int N;
double dt;
double T;
double nu;
FILE* inputfile = fopen(argv[1], "r");
fscanf(inputfile, "%d", &N); // read an integer N for example
fscanf(inputfile, "%lf", &dt);
fscanf(inputfile, "%lf", &T);
fscanf(inputfile, "%lf", &nu);
fclose(inputfile);
double dx,dtest;
double lamda;
int i,j; // counter
int nsteps; // time step
fwrite(&N, sizeof(int), 1, outfile); // output N
dtest=N;
dx=2/(dtest-1); // find the distance for two x grid points
nsteps=round(T/dt); // how many steps for the whole process
lamda=nu*dt/2/(dx*dx); // generate the lamda
// give number to global kernal dev_N and dev_lamda
hipMemcpyToSymbol(dev_N, &N, sizeof(int));
hipMemcpyToSymbol(dev_lamda, &lamda, sizeof(double));
// initial the matrix and copy the matrix into kernel
double *x=(double*)malloc(N*sizeof(double));
double *u=(double*)malloc(N*N*sizeof(double));
double *dev_unew;
double *dev_uold;
hipMalloc((void **)&dev_unew, N*N*sizeof(double));
hipMalloc((void **)&dev_uold, N*N*sizeof(double));
// initial the array x
for (i=0;i<N;i++)
{
x[i]=-1+i*dx;
}
// generate the matrix u
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
u[i+j*N]=-x[i]*x[j]+cos(11*M_PI*x[i]/2)*sin(8*M_PI*x[j]);
}
}
fwrite(x, sizeof(double),N,outfile); // output x
fwrite(u, sizeof(double),N*N,outfile); // output u
// Initialize the MAGMA system
magma_init();
magma_int_t *piv, info;
magma_int_t dimen_N=N;
magma_int_t dimen_b=N-2;
double* A; // matrix Ax=b
double* dev_A; // matrix A for kernel
piv=(magma_int_t*)malloc(dimen_N*sizeof(magma_int_t));
// assign memory for the cuda lapack
magma_dmalloc_cpu(&A,dimen_N*dimen_N);
magma_dmalloc(&dev_A,dimen_N*dimen_N);
for (i=0;i<N*N;i++)
{
A[i]=0.;
}
for(i=1; i<N-1; i++)
{
// wipe out the first row and last row
A[N*(i-1)+i]=-lamda;
A[N*(i)+i]=1+2*lamda;
A[N*(i+1)+i]=-lamda;
}
// the head and tail for the A
A[0]=1.0;
A[N*N-1]=1.0;
// Send this matrix to the device
magma_dsetmatrix(dimen_N,dimen_N,A,dimen_N,dev_A,dimen_N);
// Get the first part of solver
magma_dgetrf_gpu(dimen_N,dimen_N,dev_A,dimen_N,piv,&info);
// initial the state
hipMemcpy(dev_uold, u, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_unew, u, N*N*sizeof(double), hipMemcpyHostToDevice);
// Main loop
for(i=0;i<nsteps;i++)
{
// step AhipLaunchKernelGGL((
solve), dim3(N), dim3(N), 0, 0, dev_uold, dev_unew);hipLaunchKernelGGL((
transpose), dim3(N),dim3(N), 0, 0, dev_unew,dev_uold);
// step B
magma_dgetrs_gpu(MagmaNoTrans,dimen_N,dimen_b,dev_A,dimen_N,piv,&(dev_uold[N]),dimen_N,&info);
// step ChipLaunchKernelGGL((
solve), dim3(N), dim3(N), 0, 0, dev_uold, dev_unew);hipLaunchKernelGGL((
transpose), dim3(N),dim3(N), 0, 0, dev_unew,dev_uold);
// step D
magma_dgetrs_gpu(MagmaNoTrans,dimen_N,dimen_b,dev_A,dimen_N,piv,&(dev_uold[N]),dimen_N,&info);
if((i+1)%1000==0)
{
// output the intended result
hipMemcpy(u, dev_uold, N*N*sizeof(double), hipMemcpyDeviceToHost);
fwrite(u, sizeof(double), N*N, outfile);
}
}
// output the time used
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("The Elapsed Time is %f seconds \n",elapsedTime/(float)1000);
//close the output file
fclose(outfile);
//free memory
free(u);
free(x);
free(A);
magma_free(dev_A);
free(piv);
hipFree(dev_unew);
hipFree(dev_uold);
magma_finalize();
return 0;
}
| 9924a811b87018eb06cb9ba71b1ca0fc96c98ede.cu | //
// main.c
// problem_2
//
// Created by Alex CONG on 6/6/2016.
// Copyright ยฉ 2016 Alex CONG. All rights reserved.
// Update from problem_2 of project 3
// Generate the matrix of U[y][x]
// With the boundary condition u(-1,y,t)=y, u(1,y,t)=-y, u(x,-1,t)=x, u(x,1,t)=-x
// Apply ADI method to find ut=nu*(uxx+uyy);
// in kernel
// {
// explicit the localU
// transpose them
// Lapack to solve diagonal matrix
// explicit the localU
// transpose them
// Lapack to solve diagonal matrix
// }
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <magma.h>
#include <magma_types.h>
#include <magma_lapack.h>
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
__device__ static int dev_N; // global variable for dimension N in kernel
__device__ static double dev_lamda; // lamda is coefficent nu*dt/2/(dx*dx)
const int gendimT=1024;// number of general threads number
__global__ void solve(double* u, double* unew){
__shared__ double localu[gendimT]; // local memory
int row,col; // row and column for the local u
int id_local;
int id=blockIdx.x*dev_N+threadIdx.x;
if (threadIdx.x<dev_N && blockIdx.x<dev_N)
{
id_local=threadIdx.x;
row=id/dev_N;
col=id%dev_N;
localu[id_local]=u[id];
__syncthreads();
if (row!=0 && row!=dev_N-1 && col!=0 && col!=dev_N-1)
{
unew[id]=localu[id_local]+dev_lamda*(localu[id_local+1]-2*localu[id_local]+localu[id_local-1]);
}
}
}
__global__ void transpose(double* u,double* unew){
__shared__ double localu[gendimT]; // local memory
int id=blockIdx.x*dev_N+threadIdx.x;
if (threadIdx.x<dev_N && blockIdx.x<dev_N)
{
int row=id/dev_N;
int col=id%dev_N;
localu[threadIdx.x]=u[id];
__syncthreads();
unew[col*dev_N+row]=localu[threadIdx.x];
}
}
int main(int argc,char * argv[]) {
//initial the cuda
cudaDeviceProp prop;
int dev;
memset( &prop, 0, sizeof(cudaDeviceProp));
prop.multiProcessorCount = 13;
prop.major = 3;
prop.minor = 5;
cudaChooseDevice(&dev, &prop);
cudaSetDevice(dev);
//create the event and record times
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
float elapsedTime;
FILE* outfile=fopen(argv[2], "w");
// start reading input data using function fscanf here
int N;
double dt;
double T;
double nu;
FILE* inputfile = fopen(argv[1], "r");
fscanf(inputfile, "%d", &N); // read an integer N for example
fscanf(inputfile, "%lf", &dt);
fscanf(inputfile, "%lf", &T);
fscanf(inputfile, "%lf", &nu);
fclose(inputfile);
double dx,dtest;
double lamda;
int i,j; // counter
int nsteps; // time step
fwrite(&N, sizeof(int), 1, outfile); // output N
dtest=N;
dx=2/(dtest-1); // find the distance for two x grid points
nsteps=round(T/dt); // how many steps for the whole process
lamda=nu*dt/2/(dx*dx); // generate the lamda
// give number to global kernal dev_N and dev_lamda
cudaMemcpyToSymbol(dev_N, &N, sizeof(int));
cudaMemcpyToSymbol(dev_lamda, &lamda, sizeof(double));
// initial the matrix and copy the matrix into kernel
double *x=(double*)malloc(N*sizeof(double));
double *u=(double*)malloc(N*N*sizeof(double));
double *dev_unew;
double *dev_uold;
cudaMalloc((void **)&dev_unew, N*N*sizeof(double));
cudaMalloc((void **)&dev_uold, N*N*sizeof(double));
// initial the array x
for (i=0;i<N;i++)
{
x[i]=-1+i*dx;
}
// generate the matrix u
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
u[i+j*N]=-x[i]*x[j]+cos(11*M_PI*x[i]/2)*sin(8*M_PI*x[j]);
}
}
fwrite(x, sizeof(double),N,outfile); // output x
fwrite(u, sizeof(double),N*N,outfile); // output u
// Initialize the MAGMA system
magma_init();
magma_int_t *piv, info;
magma_int_t dimen_N=N;
magma_int_t dimen_b=N-2;
double* A; // matrix Ax=b
double* dev_A; // matrix A for kernel
piv=(magma_int_t*)malloc(dimen_N*sizeof(magma_int_t));
// assign memory for the cuda lapack
magma_dmalloc_cpu(&A,dimen_N*dimen_N);
magma_dmalloc(&dev_A,dimen_N*dimen_N);
for (i=0;i<N*N;i++)
{
A[i]=0.;
}
for(i=1; i<N-1; i++)
{
// wipe out the first row and last row
A[N*(i-1)+i]=-lamda;
A[N*(i)+i]=1+2*lamda;
A[N*(i+1)+i]=-lamda;
}
// the head and tail for the A
A[0]=1.0;
A[N*N-1]=1.0;
// Send this matrix to the device
magma_dsetmatrix(dimen_N,dimen_N,A,dimen_N,dev_A,dimen_N);
// Get the first part of solver
magma_dgetrf_gpu(dimen_N,dimen_N,dev_A,dimen_N,piv,&info);
// initial the state
cudaMemcpy(dev_uold, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_unew, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
// Main loop
for(i=0;i<nsteps;i++)
{
// step A
solve<<<N, N>>>(dev_uold, dev_unew);
transpose<<<N,N>>>(dev_unew,dev_uold);
// step B
magma_dgetrs_gpu(MagmaNoTrans,dimen_N,dimen_b,dev_A,dimen_N,piv,&(dev_uold[N]),dimen_N,&info);
// step C
solve<<<N, N>>>(dev_uold, dev_unew);
transpose<<<N,N>>>(dev_unew,dev_uold);
// step D
magma_dgetrs_gpu(MagmaNoTrans,dimen_N,dimen_b,dev_A,dimen_N,piv,&(dev_uold[N]),dimen_N,&info);
if((i+1)%1000==0)
{
// output the intended result
cudaMemcpy(u, dev_uold, N*N*sizeof(double), cudaMemcpyDeviceToHost);
fwrite(u, sizeof(double), N*N, outfile);
}
}
// output the time used
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("The Elapsed Time is %f seconds \n",elapsedTime/(float)1000);
//close the output file
fclose(outfile);
//free memory
free(u);
free(x);
free(A);
magma_free(dev_A);
free(piv);
cudaFree(dev_unew);
cudaFree(dev_uold);
magma_finalize();
return 0;
}
|
2c36b3660c033e3428536011d48d95e013e3538d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarfg-v2.cu normal z -> c, Tue Feb 9 16:05:30 2016
*/
#include "magma_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
__global__
void magma_clarfg_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
float xnorm;
magmaFloatComplex dxi;
#ifdef REAL
if ( n <= 1 )
#else
if ( n <= 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaFloatComplex alpha = *dx0;
#ifdef REAL
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_C_REAL(alpha);
float alphai = MAGMA_C_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_C_MAKE(beta, 0.);
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
}
else {
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfg_gpu_q(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dAkk,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_scnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_scnrm2_cols_q(n-1, 1, dx0+1, n, dxnorm, queue);
hipLaunchKernelGGL(( magma_clarfg_gpu_kernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
n, dx0, dx, dtau, dxnorm, dAkk);
}
| 2c36b3660c033e3428536011d48d95e013e3538d.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarfg-v2.cu normal z -> c, Tue Feb 9 16:05:30 2016
*/
#include "magma_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define COMPLEX
__global__
void magma_clarfg_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm, magmaFloatComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
float xnorm;
magmaFloatComplex dxi;
#ifdef REAL
if ( n <= 1 )
#else
if ( n <= 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaFloatComplex alpha = *dx0;
#ifdef REAL
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_C_REAL(alpha);
float alphai = MAGMA_C_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_C_MAKE(beta, 0.);
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
}
else {
*dtau = MAGMA_C_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ยฑnorm( [dx0, dx] ) = ยฑdxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfg_gpu_q(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dAkk,
magma_queue_t queue )
{
dim3 blocks( magma_ceildiv( n, BLOCK_SIZE ) );
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_scnrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_scnrm2_cols_q(n-1, 1, dx0+1, n, dxnorm, queue);
magma_clarfg_gpu_kernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
21831d5715e74164fbf3e34d0b9378b90b103972.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <chrono>
#define BLOCK_COUNT 256u
#define HALF_BLOCK_COUNT 128u
#define BANKS 16
#define LOG_2_BANKS 4
// macro used for computing
// Bank-Conflict-Free Shared Memory Array Indices
#define AVOID_BANK_CONFLICTS(idx) ((idx) >> BANKS + (idx) >> (LOG_2_BANKS << 1))
#define CSC(call) do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while (0)
__global__ void Histogram(unsigned char* data, int size, int* histo)
{
// ,
__shared__ int tmp[BLOCK_COUNT];
//
int idx = blockDim.x * blockIdx.x + threadIdx.x;
//
int offset = gridDim.x * blockDim.x;
// ,
// 256 0
tmp[threadIdx.x] = 0;
__syncthreads(); // tmp
// data,
// size
while (idx < size)
{
// ,
//
atomicAdd(&tmp[data[idx]], 1);
idx += offset; //
}
__syncthreads(); //
// histo
int i = threadIdx.x;
while (i < BLOCK_COUNT)
{
atomicAdd(&histo[i], tmp[i]);
i += blockDim.x;
}
}
__global__ void Scan(int* histo, int* prefixSum)
{
__shared__ int tmp[BLOCK_COUNT];
int threadId = threadIdx.x;
int offset = 1;
int aIdx = threadIdx.x;
int bIdx = threadIdx.x + HALF_BLOCK_COUNT;
int bankOffsetA = AVOID_BANK_CONFLICTS(aIdx);
int bankOffsetB = AVOID_BANK_CONFLICTS(bIdx);
//
tmp[aIdx + bankOffsetA] = histo[aIdx];
tmp[bIdx + bankOffsetB] = histo[bIdx];
//
{
int lvl = BLOCK_COUNT >> 1;
while (lvl > 0)
{
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
tmp[bIndex] += tmp[aIndex];
}
offset <<= 1;
lvl >>= 1;
}
}
//
if (threadId == 0)
{
tmp[BLOCK_COUNT - 1 + AVOID_BANK_CONFLICTS(BLOCK_COUNT - 1)] = 0;
}
// ""
{
int lvl = 1;
while (lvl < BLOCK_COUNT)
{
offset >>= 1;
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
int temp = tmp[aIndex];
tmp[aIndex] = tmp[bIndex];
tmp[bIndex] += temp;
}
lvl <<= 1;
}
}
__syncthreads();
// prefixSum
prefixSum[aIdx] = histo[aIdx] + tmp[aIdx + bankOffsetA];
prefixSum[bIdx] = histo[bIdx] + tmp[bIdx + bankOffsetB];
}
__global__ void CountSort(unsigned char* data, int* prefixSum, unsigned char* result, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
int i = idx, j;
while (i < size)
{
// j = prefixSum[i] - 1;
// bound = i ? prefixSum[i - 1] : 0;
// while (j >= bound)
// {
// data[j] = i;
// --j;
// }
j = atomicSub(&prefixSum[data[i]], 1) - 1;
result[j] = data[i];
i += offset;
}
}
int main()
{
int size;
freopen(NULL, "rb", stdin);
fread(&size, sizeof(int), 1, stdin);
unsigned char* data = new unsigned char[size];
fread(data, sizeof(unsigned char), size, stdin);
fclose(stdin);
unsigned char* deviceData;
unsigned char* deviceResult;
int* deviceHisto;
int* devicePrefix;
float elapsedTime;
hipEvent_t start, stop;
CSC(hipEventCreate(&start));
CSC(hipEventCreate(&stop));
CSC(hipMalloc((void**)&deviceData, sizeof(unsigned char) * size));
CSC(hipMemcpy(deviceData, data, sizeof(unsigned char) * size, hipMemcpyHostToDevice));
CSC(hipMalloc((void**)&deviceHisto, sizeof(int) * BLOCK_COUNT));
CSC(hipMalloc((void**)&devicePrefix, sizeof(int) * BLOCK_COUNT));
CSC(hipMemset(deviceHisto, 0, sizeof(int) * BLOCK_COUNT));
CSC(hipMalloc((void**)&deviceResult, sizeof(unsigned char) * size));
CSC(hipEventRecord(start));
hipLaunchKernelGGL(( Histogram), dim3(BLOCK_COUNT), dim3(BLOCK_COUNT), 0, 0, deviceData, size, deviceHisto);
hipDeviceSynchronize(); // wait end
CSC(hipGetLastError());
hipLaunchKernelGGL(( Scan), dim3(1), dim3(HALF_BLOCK_COUNT), 0, 0, deviceHisto, devicePrefix);
hipDeviceSynchronize(); // wait end
CSC(hipGetLastError());
hipLaunchKernelGGL(( CountSort), dim3(1), dim3(BLOCK_COUNT), 0, 0, deviceData, devicePrefix, deviceResult, size);
hipDeviceSynchronize(); // wait end
CSC(hipGetLastError());
CSC(hipEventRecord(stop));
CSC(hipEventSynchronize(stop));
CSC(hipEventElapsedTime(&elapsedTime, start, stop));
CSC(hipEventDestroy(start));
CSC(hipEventDestroy(stop));
printf("Time : %f ms \n", elapsedTime);
CSC(hipMemcpy(data, deviceResult, sizeof(unsigned char) * size, hipMemcpyDeviceToHost));
// freopen(NULL, "wb", stdout);
// fwrite(data, sizeof(unsigned char), size, stdout);
// fclose(stdout);
CSC(hipFree(deviceData));
CSC(hipFree(deviceHisto));
CSC(hipFree(devicePrefix));
delete[] data;
return 0;
} | 21831d5715e74164fbf3e34d0b9378b90b103972.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
#include <sys/time.h>
#include <chrono>
#define BLOCK_COUNT 256u
#define HALF_BLOCK_COUNT 128u
#define BANKS 16
#define LOG_2_BANKS 4
// macro used for computing
// Bank-Conflict-Free Shared Memory Array Indices
#define AVOID_BANK_CONFLICTS(idx) ((idx) >> BANKS + (idx) >> (LOG_2_BANKS << 1))
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
__global__ void Histogram(unsigned char* data, int size, int* histo)
{
// ะฒัะดะตะปัะตะผ ัะฐะทะดะตะปัะตะผัั ะฟะฐะผััั, ะพะฑัะตะผ ะฟะฐะผััะธ ัะฐะฒะตะฝ ะบะพะปะธัะตััะฒั ะบะพัะทะธะฝะพะบ
__shared__ int tmp[BLOCK_COUNT];
// ะฒััะธัะปัะตะผ ะฐะฑัะพะปััะฝัะน ะธะดะตะฝัะธัะธะบะฐัะพั
int idx = blockDim.x * blockIdx.x + threadIdx.x;
// ัะผะตัะตะฝะธะต
int offset = gridDim.x * blockDim.x;
// ะทะฐะฟะพะปะฝะธะผ ะฒัะตะผะตะฝะฝัะน ะผะฐััะธะฒ ะฝัะปะตะผ,
// ัะฐะบัะธัะตัะบะธ ะทะฐะฟะพะปะฝัะตะผ 256 ัะปะตะผะตะฝัะพะฒ ะบะฐะบ 0 ะฒ ะพะฑัะตะน ะฟะฐะผััะธ
tmp[threadIdx.x] = 0;
__syncthreads(); // ะฟะพัะพะบะธ ะพะถะธะดะฐัั ะฒัะฟะพะปะฝะตะฝะธั ะทะฐะฟะพะปะฝะตะฝะธั ะฝัะปะตะผ tmp
// ะฟะตัะตะฑะธัะฐะตะผ ะฒัะต ัะปะตะผะตะฝัั ะฑััะตัะฐ data,
// ะฟะพะบะฐ ะฐะฑั ะธะดะตะฝัะธัะธะบะฐัะพั ะฝะต ะบะพัะฝะตััั ะทะฝะฐัะตะฝะธั size
while (idx < size)
{
// ะธะทะฒะปะตะบะฐะตะผ ะทะฝะฐัะตะฝะธะต, ะฝะฐั
ะพะดััะตะตัั ะฒ ะฑััะตัะต
// ะธ ัะฒะตะปะธัะธะฒะฐะตะผ ััะตััะธะบ ะฒ ะผะฐััะธะฒะต ัะฐะทะดะตะปัะตะผะพะน ะฟะฐะผััะธ
atomicAdd(&tmp[data[idx]], 1);
idx += offset; // ัะฒะตะปะธัะตะฝะธะต ะฐะฑั ะธะดะตะฝัะธั ะฟะพ ัะผะตัะตะฝะธะต
}
__syncthreads(); // ะถะดะตะผ ะฒัะต ะฟะพัะพะบะธ
// ะพะฑะฝะพะฒะปัะตะผ ะพะบะพะฝัะฐัะตะปัะฝัะน ัะตะทัะปััะฐั ะฒ ะผะฐััะธะฒะต histo
int i = threadIdx.x;
while (i < BLOCK_COUNT)
{
atomicAdd(&histo[i], tmp[i]);
i += blockDim.x;
}
}
__global__ void Scan(int* histo, int* prefixSum)
{
__shared__ int tmp[BLOCK_COUNT];
int threadId = threadIdx.x;
int offset = 1;
int aIdx = threadIdx.x;
int bIdx = threadIdx.x + HALF_BLOCK_COUNT;
int bankOffsetA = AVOID_BANK_CONFLICTS(aIdx);
int bankOffsetB = AVOID_BANK_CONFLICTS(bIdx);
// ะทะฐะณััะถะฐะตะผ ะดะฐะฝะฝัะต ะธะท ะณะธััะพะณัะฐะผะผั ะฒ ะพะฑััั ะฟะฐะผััั
tmp[aIdx + bankOffsetA] = histo[aIdx];
tmp[bIdx + bankOffsetB] = histo[bIdx];
// ัััะพะธะผ ััะผะผั ะฝะฐ ะผะตััะต ะฒะฒะตัั
ะฟะพ ะดะตัะตะฒั
{
int lvl = BLOCK_COUNT >> 1;
while (lvl > 0)
{
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
tmp[bIndex] += tmp[aIndex];
}
offset <<= 1;
lvl >>= 1;
}
}
// ะพัะธัะฐะตะผ ะฟะพัะปะตะดะฝะธะน ัะปะตะผะตะฝั
if (threadId == 0)
{
tmp[BLOCK_COUNT - 1 + AVOID_BANK_CONFLICTS(BLOCK_COUNT - 1)] = 0;
}
// ะธะดะตะผ ะฒะฝะธะท ะฟะพ "ะดะตัะตะฒั" ะธ ัััะพะธะผ ัะบะฐะฝะธัะพะฒะฐะฝะธะต
{
int lvl = 1;
while (lvl < BLOCK_COUNT)
{
offset >>= 1;
__syncthreads();
if (threadId < lvl)
{
int aIndex = (offset * (threadId * 2 + 1) - 1);
int bIndex = (offset * (threadId * 2 + 2) - 1);
aIndex += AVOID_BANK_CONFLICTS(aIndex);
bIndex += AVOID_BANK_CONFLICTS(bIndex);
int temp = tmp[aIndex];
tmp[aIndex] = tmp[bIndex];
tmp[bIndex] += temp;
}
lvl <<= 1;
}
}
__syncthreads();
// ะทะฐะฟะธััะฒะฐะตะผ ัะตะทัะปััะฐัั ะฒ ะผะฐััะธะฒ prefixSum
prefixSum[aIdx] = histo[aIdx] + tmp[aIdx + bankOffsetA];
prefixSum[bIdx] = histo[bIdx] + tmp[bIdx + bankOffsetB];
}
__global__ void CountSort(unsigned char* data, int* prefixSum, unsigned char* result, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
int i = idx, j;
while (i < size)
{
// j = prefixSum[i] - 1;
// bound = i ? prefixSum[i - 1] : 0;
// while (j >= bound)
// {
// data[j] = i;
// --j;
// }
j = atomicSub(&prefixSum[data[i]], 1) - 1;
result[j] = data[i];
i += offset;
}
}
int main()
{
int size;
freopen(NULL, "rb", stdin);
fread(&size, sizeof(int), 1, stdin);
unsigned char* data = new unsigned char[size];
fread(data, sizeof(unsigned char), size, stdin);
fclose(stdin);
unsigned char* deviceData;
unsigned char* deviceResult;
int* deviceHisto;
int* devicePrefix;
float elapsedTime;
cudaEvent_t start, stop;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaMalloc((void**)&deviceData, sizeof(unsigned char) * size));
CSC(cudaMemcpy(deviceData, data, sizeof(unsigned char) * size, cudaMemcpyHostToDevice));
CSC(cudaMalloc((void**)&deviceHisto, sizeof(int) * BLOCK_COUNT));
CSC(cudaMalloc((void**)&devicePrefix, sizeof(int) * BLOCK_COUNT));
CSC(cudaMemset(deviceHisto, 0, sizeof(int) * BLOCK_COUNT));
CSC(cudaMalloc((void**)&deviceResult, sizeof(unsigned char) * size));
CSC(cudaEventRecord(start));
Histogram<<<BLOCK_COUNT, BLOCK_COUNT>>>(deviceData, size, deviceHisto);
cudaThreadSynchronize(); // wait end
CSC(cudaGetLastError());
Scan<<<1, HALF_BLOCK_COUNT>>>(deviceHisto, devicePrefix);
cudaThreadSynchronize(); // wait end
CSC(cudaGetLastError());
CountSort<<<1, BLOCK_COUNT>>>(deviceData, devicePrefix, deviceResult, size);
cudaThreadSynchronize(); // wait end
CSC(cudaGetLastError());
CSC(cudaEventRecord(stop));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&elapsedTime, start, stop));
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
printf("Time : %f ms \n", elapsedTime);
CSC(cudaMemcpy(data, deviceResult, sizeof(unsigned char) * size, cudaMemcpyDeviceToHost));
// freopen(NULL, "wb", stdout);
// fwrite(data, sizeof(unsigned char), size, stdout);
// fclose(stdout);
CSC(cudaFree(deviceData));
CSC(cudaFree(deviceHisto));
CSC(cudaFree(devicePrefix));
delete[] data;
return 0;
} |
78e69afce9cc99a68899086ffcdf98a34ba16506.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <hipfft.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "GpuTimer.h"
#define NUM_STREAMS 3
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/********/
/* MAIN */
/********/
int main()
{
const int N = 500000;
// --- Host input data initialization
float2 *h_in1 = new float2[N];
float2 *h_in2 = new float2[N];
float2 *h_in3 = new float2[N];
for (int i = 0; i < N; i++) {
h_in1[i].x = 1.f;
h_in1[i].y = 0.f;
h_in2[i].x = 1.f;
h_in2[i].y = 0.f;
h_in3[i].x = 1.f;
h_in3[i].y = 0.f;
}
// --- Host output data initialization
float2 *h_out1 = new float2[N];
float2 *h_out2 = new float2[N];
float2 *h_out3 = new float2[N];
for (int i = 0; i < N; i++) {
h_out1[i].x = 0.f;
h_out1[i].y = 0.f;
h_out2[i].x = 0.f;
h_out2[i].y = 0.f;
h_out3[i].x = 0.f;
h_out3[i].y = 0.f;
}
// --- Registers host memory as page-locked (required for asynch hipMemcpyAsync)
gpuErrchk(hipHostRegister(h_in1, N*sizeof(float2), hipHostRegisterPortable));
gpuErrchk(hipHostRegister(h_in2, N*sizeof(float2), hipHostRegisterPortable));
gpuErrchk(hipHostRegister(h_in3, N*sizeof(float2), hipHostRegisterPortable));
gpuErrchk(hipHostRegister(h_out1, N*sizeof(float2), hipHostRegisterPortable));
gpuErrchk(hipHostRegister(h_out2, N*sizeof(float2), hipHostRegisterPortable));
gpuErrchk(hipHostRegister(h_out3, N*sizeof(float2), hipHostRegisterPortable));
// --- Device input data allocation
float2 *d_in1; gpuErrchk(hipMalloc((void**)&d_in1, N*sizeof(float2)));
float2 *d_in2; gpuErrchk(hipMalloc((void**)&d_in2, N*sizeof(float2)));
float2 *d_in3; gpuErrchk(hipMalloc((void**)&d_in3, N*sizeof(float2)));
float2 *d_out1; gpuErrchk(hipMalloc((void**)&d_out1, N*sizeof(float2)));
float2 *d_out2; gpuErrchk(hipMalloc((void**)&d_out2, N*sizeof(float2)));
float2 *d_out3; gpuErrchk(hipMalloc((void**)&d_out3, N*sizeof(float2)));
// --- Creates CUDA streams
hipStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++) gpuErrchk(hipStreamCreate(&streams[i]));
// --- Creates cuFFT plans and sets them in streams
hipfftHandle* plans = (hipfftHandle*) malloc(sizeof(hipfftHandle)*NUM_STREAMS);
for (int i = 0; i < NUM_STREAMS; i++) {
hipfftPlan1d(&plans[i], N, HIPFFT_C2C, 1);
hipfftSetStream(plans[i], streams[i]);
}
GpuTimer timer;
timer.Start();
// --- Async memcopyes and computations
gpuErrchk(hipMemcpyAsync(d_in1, h_in1, N*sizeof(float2), hipMemcpyHostToDevice, streams[0]));
gpuErrchk(hipMemcpyAsync(d_in2, h_in2, N*sizeof(float2), hipMemcpyHostToDevice, streams[1]));
gpuErrchk(hipMemcpyAsync(d_in3, h_in3, N*sizeof(float2), hipMemcpyHostToDevice, streams[2]));
hipfftExecC2C(plans[0], (hipfftComplex*)d_in1, (hipfftComplex*)d_out1, HIPFFT_FORWARD);
hipfftExecC2C(plans[1], (hipfftComplex*)d_in2, (hipfftComplex*)d_out2, HIPFFT_FORWARD);
hipfftExecC2C(plans[2], (hipfftComplex*)d_in3, (hipfftComplex*)d_out3, HIPFFT_FORWARD);
hipfftExecC2C(plans[0], (hipfftComplex*)d_out1, (hipfftComplex*)d_out1, HIPFFT_BACKWARD);
hipfftExecC2C(plans[1], (hipfftComplex*)d_out2, (hipfftComplex*)d_out2, HIPFFT_BACKWARD);
hipfftExecC2C(plans[2], (hipfftComplex*)d_out3, (hipfftComplex*)d_out3, HIPFFT_BACKWARD);
gpuErrchk(hipMemcpyAsync(h_out1, d_out1, N*sizeof(float2), hipMemcpyDeviceToHost, streams[0]));
gpuErrchk(hipMemcpyAsync(h_out2, d_out2, N*sizeof(float2), hipMemcpyDeviceToHost, streams[1]));
gpuErrchk(hipMemcpyAsync(h_out3, d_out3, N*sizeof(float2), hipMemcpyDeviceToHost, streams[2]));
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(hipStreamSynchronize(streams[i]));
timer.Stop();
float ms = timer.Elapsed();
printf("Stream Fast Fourier Transform. Time Elapsed: %f ms\n", ms);
// --- Releases resources
gpuErrchk(hipHostUnregister(h_in1));
gpuErrchk(hipHostUnregister(h_in2));
gpuErrchk(hipHostUnregister(h_in3));
gpuErrchk(hipHostUnregister(h_out1));
gpuErrchk(hipHostUnregister(h_out2));
gpuErrchk(hipHostUnregister(h_out3));
gpuErrchk(hipFree(d_in1));
gpuErrchk(hipFree(d_in2));
gpuErrchk(hipFree(d_in3));
gpuErrchk(hipFree(d_out1));
gpuErrchk(hipFree(d_out2));
gpuErrchk(hipFree(d_out3));
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(hipStreamDestroy(streams[i]));
delete[] h_in1;
delete[] h_in2;
delete[] h_in3;
delete[] h_out1;
delete[] h_out2;
delete[] h_out3;
//hipDeviceReset();
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
} | 78e69afce9cc99a68899086ffcdf98a34ba16506.cu | #include <string.h>
#include <cufft.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "GpuTimer.h"
#define NUM_STREAMS 3
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/********/
/* MAIN */
/********/
int main()
{
const int N = 500000;
// --- Host input data initialization
float2 *h_in1 = new float2[N];
float2 *h_in2 = new float2[N];
float2 *h_in3 = new float2[N];
for (int i = 0; i < N; i++) {
h_in1[i].x = 1.f;
h_in1[i].y = 0.f;
h_in2[i].x = 1.f;
h_in2[i].y = 0.f;
h_in3[i].x = 1.f;
h_in3[i].y = 0.f;
}
// --- Host output data initialization
float2 *h_out1 = new float2[N];
float2 *h_out2 = new float2[N];
float2 *h_out3 = new float2[N];
for (int i = 0; i < N; i++) {
h_out1[i].x = 0.f;
h_out1[i].y = 0.f;
h_out2[i].x = 0.f;
h_out2[i].y = 0.f;
h_out3[i].x = 0.f;
h_out3[i].y = 0.f;
}
// --- Registers host memory as page-locked (required for asynch cudaMemcpyAsync)
gpuErrchk(cudaHostRegister(h_in1, N*sizeof(float2), cudaHostRegisterPortable));
gpuErrchk(cudaHostRegister(h_in2, N*sizeof(float2), cudaHostRegisterPortable));
gpuErrchk(cudaHostRegister(h_in3, N*sizeof(float2), cudaHostRegisterPortable));
gpuErrchk(cudaHostRegister(h_out1, N*sizeof(float2), cudaHostRegisterPortable));
gpuErrchk(cudaHostRegister(h_out2, N*sizeof(float2), cudaHostRegisterPortable));
gpuErrchk(cudaHostRegister(h_out3, N*sizeof(float2), cudaHostRegisterPortable));
// --- Device input data allocation
float2 *d_in1; gpuErrchk(cudaMalloc((void**)&d_in1, N*sizeof(float2)));
float2 *d_in2; gpuErrchk(cudaMalloc((void**)&d_in2, N*sizeof(float2)));
float2 *d_in3; gpuErrchk(cudaMalloc((void**)&d_in3, N*sizeof(float2)));
float2 *d_out1; gpuErrchk(cudaMalloc((void**)&d_out1, N*sizeof(float2)));
float2 *d_out2; gpuErrchk(cudaMalloc((void**)&d_out2, N*sizeof(float2)));
float2 *d_out3; gpuErrchk(cudaMalloc((void**)&d_out3, N*sizeof(float2)));
// --- Creates CUDA streams
cudaStream_t streams[NUM_STREAMS];
for (int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamCreate(&streams[i]));
// --- Creates cuFFT plans and sets them in streams
cufftHandle* plans = (cufftHandle*) malloc(sizeof(cufftHandle)*NUM_STREAMS);
for (int i = 0; i < NUM_STREAMS; i++) {
cufftPlan1d(&plans[i], N, CUFFT_C2C, 1);
cufftSetStream(plans[i], streams[i]);
}
GpuTimer timer;
timer.Start();
// --- Async memcopyes and computations
gpuErrchk(cudaMemcpyAsync(d_in1, h_in1, N*sizeof(float2), cudaMemcpyHostToDevice, streams[0]));
gpuErrchk(cudaMemcpyAsync(d_in2, h_in2, N*sizeof(float2), cudaMemcpyHostToDevice, streams[1]));
gpuErrchk(cudaMemcpyAsync(d_in3, h_in3, N*sizeof(float2), cudaMemcpyHostToDevice, streams[2]));
cufftExecC2C(plans[0], (cufftComplex*)d_in1, (cufftComplex*)d_out1, CUFFT_FORWARD);
cufftExecC2C(plans[1], (cufftComplex*)d_in2, (cufftComplex*)d_out2, CUFFT_FORWARD);
cufftExecC2C(plans[2], (cufftComplex*)d_in3, (cufftComplex*)d_out3, CUFFT_FORWARD);
cufftExecC2C(plans[0], (cufftComplex*)d_out1, (cufftComplex*)d_out1, CUFFT_INVERSE);
cufftExecC2C(plans[1], (cufftComplex*)d_out2, (cufftComplex*)d_out2, CUFFT_INVERSE);
cufftExecC2C(plans[2], (cufftComplex*)d_out3, (cufftComplex*)d_out3, CUFFT_INVERSE);
gpuErrchk(cudaMemcpyAsync(h_out1, d_out1, N*sizeof(float2), cudaMemcpyDeviceToHost, streams[0]));
gpuErrchk(cudaMemcpyAsync(h_out2, d_out2, N*sizeof(float2), cudaMemcpyDeviceToHost, streams[1]));
gpuErrchk(cudaMemcpyAsync(h_out3, d_out3, N*sizeof(float2), cudaMemcpyDeviceToHost, streams[2]));
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(cudaStreamSynchronize(streams[i]));
timer.Stop();
float ms = timer.Elapsed();
printf("Stream Fast Fourier Transform. Time Elapsed: %f ms\n", ms);
// --- Releases resources
gpuErrchk(cudaHostUnregister(h_in1));
gpuErrchk(cudaHostUnregister(h_in2));
gpuErrchk(cudaHostUnregister(h_in3));
gpuErrchk(cudaHostUnregister(h_out1));
gpuErrchk(cudaHostUnregister(h_out2));
gpuErrchk(cudaHostUnregister(h_out3));
gpuErrchk(cudaFree(d_in1));
gpuErrchk(cudaFree(d_in2));
gpuErrchk(cudaFree(d_in3));
gpuErrchk(cudaFree(d_out1));
gpuErrchk(cudaFree(d_out2));
gpuErrchk(cudaFree(d_out3));
for(int i = 0; i < NUM_STREAMS; i++)
gpuErrchk(cudaStreamDestroy(streams[i]));
delete[] h_in1;
delete[] h_in2;
delete[] h_in3;
delete[] h_out1;
delete[] h_out2;
delete[] h_out3;
//cudaDeviceReset();
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
} |
1127a1f9053ea79b5d3a524b8f7b4ebf5c02c4ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calculateMatrixFormulaSharedDynamic.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *res = NULL;
hipMalloc(&res, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calculateMatrixFormulaSharedDynamic), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,res,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calculateMatrixFormulaSharedDynamic), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,res,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calculateMatrixFormulaSharedDynamic), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,res,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1127a1f9053ea79b5d3a524b8f7b4ebf5c02c4ce.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calculateMatrixFormulaSharedDynamic.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *res = NULL;
cudaMalloc(&res, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calculateMatrixFormulaSharedDynamic<<<gridBlock,threadBlock>>>(a,b,res,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calculateMatrixFormulaSharedDynamic<<<gridBlock,threadBlock>>>(a,b,res,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calculateMatrixFormulaSharedDynamic<<<gridBlock,threadBlock>>>(a,b,res,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3287997bb2bccbfac9cb32793d059c912a57fd08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void PondHeadInit(double *ph, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < size) {
ph[tid] = psi_min;
tid += blockDim.x * gridDim.x;
}
} | 3287997bb2bccbfac9cb32793d059c912a57fd08.cu | #include "includes.h"
__global__ void PondHeadInit(double *ph, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < size) {
ph[tid] = psi_min;
tid += blockDim.x * gridDim.x;
}
} |
71c1de0a2d154171b420eb541e61edd202f2658e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a hipDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8)
{
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
}
cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8)
{
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
}
cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16)
{
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
}
cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if (!printfBufferPtr)
{
return NULL;
}
// Thread/block restriction check
if ((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
{
return NULL;
}
if ((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
{
return NULL;
}
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if (thread_buf_len < (CUPRINTF_MAX_LEN * 2))
{
return NULL;
}
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if (offset >= hdr.thread_buf_len)
{
offset = CUPRINTF_MAX_LEN;
}
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if (ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if (!dest || !src || (dest >= end))
{
return NULL;
}
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while (n--)
{
if (dest >= end) // Overflow check
{
break;
}
len++;
*dest++ = *src;
if (*src++ == '\0')
{
break;
}
}
// Now write out the padding bytes, and we have our length.
while ((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if (!ptr || !arg)
{
return NULL;
}
// strncpy does all our work. We just terminate.
if ((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
{
*ptr = 0;
}
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if (!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
{
return NULL;
}
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if (((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
{
restrictRules.threadid = threadid;
}
int block_count = gridDim.x * gridDim.y;
if (((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
{
restrictRules.blockid = blockid;
}
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while (p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if (*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if (arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch (specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if (arglen == 4) // Float vs. Double thing
{
fprintf(printf_fp, format, *((float *)data));
}
else
{
fprintf(printf_fp, format, *((double *)data));
}
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while (bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if (bufptr == bufend)
{
bufptr = bufstart;
}
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if ((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if (headings)
{
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
}
if (hdr->fmtoffset == 0)
{
fprintf(printf_fp, "printf buffer overflow\n");
}
else if (!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
{
break;
}
printf_count++;
// Clear if asked
if (clear)
{
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
}
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if ((bufferLen % CUPRINTF_MAX_LEN) > 0)
{
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
}
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if (hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
{
return hipErrorInitializationError;
}
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if (!printfbuf_start || !printfbuf_device)
{
return;
}
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if (!printfbuf_start || !printfbuf_device || !printf_fp)
{
return hipErrorMissingConfiguration;
}
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if (magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while (blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if (hdr.thread_buf_len != 0)
{
blocklen = hdr.thread_buf_len;
}
// No magic number means no printfs from this thread
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
if (blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if (hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if (magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if (sync_printfs)
{
hipMemset(printfbuf_device, 0, printfbuf_len);
}
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
| 71c1de0a2d154171b420eb541e61edd202f2658e.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cudaDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8)
{
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
}
cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8)
{
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
}
cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16)
{
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
}
cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if (!printfBufferPtr)
{
return NULL;
}
// Thread/block restriction check
if ((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
{
return NULL;
}
if ((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
{
return NULL;
}
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if (thread_buf_len < (CUPRINTF_MAX_LEN * 2))
{
return NULL;
}
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if (offset >= hdr.thread_buf_len)
{
offset = CUPRINTF_MAX_LEN;
}
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if (ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if (!dest || !src || (dest >= end))
{
return NULL;
}
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while (n--)
{
if (dest >= end) // Overflow check
{
break;
}
len++;
*dest++ = *src;
if (*src++ == '\0')
{
break;
}
}
// Now write out the padding bytes, and we have our length.
while ((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if (!ptr || !arg)
{
return NULL;
}
// strncpy does all our work. We just terminate.
if ((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
{
*ptr = 0;
}
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if (!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
{
return NULL;
}
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if (((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
{
restrictRules.threadid = threadid;
}
int block_count = gridDim.x * gridDim.y;
if (((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
{
restrictRules.blockid = blockid;
}
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while (p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if (*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if (arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch (specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if (arglen == 4) // Float vs. Double thing
{
fprintf(printf_fp, format, *((float *)data));
}
else
{
fprintf(printf_fp, format, *((double *)data));
}
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while (bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if (bufptr == bufend)
{
bufptr = bufstart;
}
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if ((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if (headings)
{
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
}
if (hdr->fmtoffset == 0)
{
fprintf(printf_fp, "printf buffer overflow\n");
}
else if (!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
{
break;
}
printf_count++;
// Clear if asked
if (clear)
{
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
}
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if ((bufferLen % CUPRINTF_MAX_LEN) > 0)
{
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
}
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if (cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
{
return cudaErrorInitializationError;
}
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if (!printfbuf_start || !printfbuf_device)
{
return;
}
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if (!printfbuf_start || !printfbuf_device || !printf_fp)
{
return cudaErrorMissingConfiguration;
}
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if (magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while (blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if (hdr.thread_buf_len != 0)
{
blocklen = hdr.thread_buf_len;
}
// No magic number means no printfs from this thread
if (hdr.magic != CUPRINTF_SM10_MAGIC)
{
if (blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if (hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if (magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if (sync_printfs)
{
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
}
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if (sync_printfs)
{
cudaMemset(printfbuf_device, 0, printfbuf_len);
}
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
e0afea9e3f02d25e6ba9519648effb37bbefea46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int NUMBEROFTEST = 1024;
typedef struct{
int length;
char typeOfSort;
}inputData;
typedef struct{
int *pArrayToSort;
}outputData;
__device__ void bubble_sort(int *pArray, int howMany);
__device__ void quick_sort(int * pArray, int start, int end);
__device__ void merge_sort(int* pArray, int length);
__device__ void merge(int * pArray, int * pTempArray, int leftStart, int* mid, int rightEnd);
__device__ void swap(int *x, int *y);
__device__ void gnome_sort(int *arr, int n);
__device__ void comb_sort(int *arr, int n);
__device__ void cocktail_sort(int *arr, int n);
__device__ void insertion_sort(int *arr, int n);
__device__ void odd_even_sort(int *arr, int n);
__device__ void shell_sort(int *arr, int n);
__device__ void selection_sort(int *arr, int n);
__device__ void radix_sort(int* arr, int n);
__device__ int find_largest_number(int arr[], int n);
__device__ int pancake_sort(int* arr, int n);
__device__ void flip(int *arr, int i) ;
__device__ int find_max(int *arr, int n);
__device__ void heap_sort(int *a, int n);
__device__ void build_heap(int *a, int n);
__device__ void satisfy_heap(int *a, int i, int heap_size);
//************************************** READING INPUT FROM FILE **************************************** /
void readingInput(inputData *dataCPU, outputData* dataOut){
int index =0;
int length;
char type;
int ret = fscanf(stdin, "%d %c",&length,&type);
while(ret != EOF){
dataCPU[index].length = length;
dataCPU[index].typeOfSort = type;
dataOut[index].pArrayToSort = (int*)malloc(length * sizeof(int));
int i;
for(i=0; i<length; i++)
fscanf(stdin, "%d", &dataOut[index].pArrayToSort[i]);
index++;
ret = fscanf(stdin, "%d %c",&length,&type);
}
NUMBEROFTEST = index;
}
__global__ void myKernel(inputData *dataCPU, outputData* dataOut)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
switch(dataCPU[index].typeOfSort){
case 'b' :
printf("Using bubblesort\n");
bubble_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'q' :
printf("Using quicksort\n");
quick_sort(dataOut[index].pArrayToSort, 0, dataCPU[index].length - 1);
break;
case 'm' :
printf("Using mergesort\n");
merge_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'o' :
printf("Using odd_even_sort\n");
odd_even_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'c' :
printf("Using cocktail_sort\n");
cocktail_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 't' :
printf("Using comb_sort\n");
comb_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'g' :
printf("Using gnome_sort\n");
gnome_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'i' :
printf("Using insertion_sort\n");
insertion_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'h' :
printf("Using shell_sort\n");
shell_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 's' :
printf("Using selection_sort\n");
selection_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'r' :
printf("Using radix_sort\n");
radix_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'p' :
printf("Using pancake_sort\n");
pancake_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'u' :
printf("Using heap_sort\n");
heap_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
default:
printf("No Correct Sort Chosen\n");
break;
}
}
//************************************* Writing time in a FILE ******************************************
void writeFile(double time, char* fileName){
FILE *fp1;
fp1 = fopen("NUMCudaResults.txt", "a");
if (fp1 == NULL){
printf("Problem in a file");
return;
}
fprintf(fp1, "%s ", fileName);
fprintf(fp1, "%f seconds\n", time);
fclose(fp1);
}
void launch(inputData *dataCPU, outputData* dataOut, char* fileName){
int threadsPerBlock = 32;
int blocksPerGrid = (NUMBEROFTEST) / threadsPerBlock;
int output_size = (NUMBEROFTEST) * sizeof(outputData);
int input_size = (NUMBEROFTEST) * sizeof(inputData);
inputData *dataGPUIn, *in_data = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST);
outputData *dataGPUOut, *out_data = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int i =0;
hipEventRecord(start);
for(; i<NUMBEROFTEST;i++){
int* d_array;
hipMalloc((void**)&d_array, dataCPU[i].length*sizeof(int));
hipMemcpy(d_array,dataOut[i].pArrayToSort, dataCPU[i].length*sizeof(int),hipMemcpyHostToDevice);
in_data[i].length = dataCPU[i].length;
in_data[i].typeOfSort = dataCPU[i].typeOfSort;
out_data[i].pArrayToSort = d_array;
}
hipMalloc ( (void**) &dataGPUIn, input_size);
hipMemcpy(dataGPUIn, in_data, input_size, hipMemcpyHostToDevice );
hipMalloc ( (void**) &dataGPUOut, output_size);
hipMemcpy(dataGPUOut, out_data, output_size, hipMemcpyHostToDevice );
myKernel << <blocksPerGrid, threadsPerBlock >> >(dataGPUIn, dataGPUOut); // excute on kernel
hipMemcpy(out_data, dataGPUOut, output_size, hipMemcpyDeviceToHost);
i=0;
for(; i<NUMBEROFTEST;i++){
hipMemcpy(dataOut[i].pArrayToSort, out_data[i].pArrayToSort, dataCPU[i].length*sizeof(int), hipMemcpyDeviceToHost);
}
hipFree(dataGPUIn);
hipFree(dataGPUOut);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
writeFile((milliseconds/1000),fileName);
}
int main (int args, char* argv[])
{
inputData *dataCPU = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST);
outputData *dataOut = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST);
readingInput(dataCPU,dataOut);
launch(dataCPU,dataOut,argv[1]);
int index=0,i;
for(;index< NUMBEROFTEST; index++) {
printf("ID# %d ", index, dataCPU[index].length);
for(i = 0 ; i < dataCPU[index].length ; i++){
printf("%d ", *(dataOut[index].pArrayToSort + i));
}
printf("\n");
}
return 0;
}
/********* https://github.com/eddyerburgh/c-sorting-algorithms******/
__device__ void quick_sort(int arr[], int first_index, int last_index) {
// declaring index variables
int pivotIndex, temp, index_a, index_b;
if (first_index < last_index) {
// assigning first element index as pivot element
pivotIndex = first_index;
index_a = first_index;
index_b = last_index;
// Sorting in Ascending order with quick sort
while (index_a < index_b) {
while (arr[index_a] <= arr[pivotIndex] && index_a < last_index) {
index_a++;
}
while (arr[index_b] > arr[pivotIndex]) {
index_b--;
}
if (index_a < index_b) {
// Swapping operation
temp = arr[index_a];
arr[index_a] = arr[index_b];
arr[index_b] = temp;
}
}
// At the end of first iteration, swap pivot element with index_b element
temp = arr[pivotIndex];
arr[pivotIndex] = arr[index_b];
arr[index_b] = temp;
// Recursive call for quick sort, with partitioning
quick_sort(arr, first_index, index_b - 1);
quick_sort(arr, index_b + 1, last_index);
}
}
__device__ void merge(
int* arr, int* left_arr, int left_count, int* right_arr, int right_count) {
int i, j, k;
i = 0;
j = 0;
k = 0;
while (i < left_count && j < right_count) {
if (left_arr[i] < right_arr[j])
arr[k++] = left_arr[i++];
else
arr[k++] = right_arr[j++];
}
while (i < left_count) arr[k++] = left_arr[i++];
while (j < right_count) arr[k++] = right_arr[j++];
}
__device__ void merge_sort(int arr[], int n) {
int mid, i, *L, *right_arr;
if (n < 2) return;
mid = n / 2;
L = (int*)malloc(mid * sizeof(int));
right_arr = (int*)malloc((n - mid) * sizeof(int));
for (i = 0; i < mid; i++) L[i] = arr[i];
for (i = mid; i < n; i++) right_arr[i - mid] = arr[i];
merge_sort(L, mid);
merge_sort(right_arr, n - mid);
merge(arr, L, mid, right_arr, n - mid);
free(L);
free(right_arr);
}
__device__ void bubble_sort(int arr[], int n) {
int temp;
int i;
int j;
i = 0;
while (i < n) {
j = 0;
while (j < i) {
if (arr[j] > arr[i]) {
temp = arr[j];
arr[j] = arr[i];
arr[i] = temp;
}
j++;
}
i++;
}
}
__device__ void odd_even_sort(int *arr, int n) {
bool sorted = false; // Initially array is unsorted
int temp;
while (!sorted) {
sorted = true;
// Perform Bubble sort on odd indexed element
for (int i = 1; i <= n - 2; i += 2) {
if (arr[i] > arr[i + 1]) {
temp = arr[i + 1];
arr[i + 1] = arr[i];
arr[i] = temp;
sorted = false;
}
}
// Perform Bubble sort on even indexed element
for (int i = 0; i <= n - 2; i += 2) {
if (arr[i] > arr[i + 1]) {
temp = arr[i + 1];
arr[i + 1] = arr[i];
arr[i] = temp;
sorted = false;
}
}
}
}
__device__ void cocktail_sort(int *arr, int n) {
bool swapped = true;
int i = 0;
int j = n - 1;
while (i < j && swapped) {
swapped = false;
for (int k = i; k < j; k++) {
if (arr[k] > arr[k + 1]) {
int temp = arr[k];
arr[k] = arr[k + 1];
arr[k + 1] = temp;
swapped = true;
}
}
j--;
if (swapped) {
swapped = false;
for (int k = j; k > i; k--) {
if (arr[k] < arr[k - 1]) {
int temp = arr[k];
arr[k] = arr[k - 1];
arr[k - 1] = temp;
swapped = true;
}
}
}
i++;
}
}
__device__ void comb_sort(int *arr, int n) {
int temp;
int i;
int j;
int gap = n;
int shrink = 3;
bool swapped;
while (gap != 1 || swapped == true) {
gap = (gap / shrink);
if (gap < 1) {
gap = 1;
}
swapped = false;
for (int i = 0; i < n - gap; i++) {
if (arr[i] > arr[i + gap]) {
temp = arr[i + gap];
arr[i + gap] = arr[i];
arr[i] = temp;
swapped = true;
}
}
}
}
__device__ void gnome_sort(int *arr, int n) {
int i = 0;
int temp;
while (i < n) {
if (i == 0 || arr[i] >= arr[i - 1]) {
i++;
} else {
temp = arr[i];
arr[i] = arr[i - 1];
arr[i - 1] = temp;
i--;
}
}
}
__device__ void insertion_sort(int *arr, int n) {
for (int i = 0; i < n; i++) {
int j = i;
while (j > 0 && arr[j - 1] > arr[j]) {
int temp = arr[j];
arr[j] = arr[j - 1];
arr[j - 1] = temp;
j--;
}
}
}
__device__ void shell_sort(int *arr, int n) {
int j;
for (int gap = n / 2; gap > 0; gap /= 2) {
for (int i = gap; i < n; ++i) {
int temp = arr[i];
for (j = i; j >= gap && temp < arr[j - gap]; j -= gap) {
arr[j] = arr[j - gap];
}
arr[j] = temp;
}
}
}
__device__ void selection_sort(int *arr, int n) {
int i, j;
for (j = 0; j < n - 1; j++) {
int iMin = j;
for (i = j + 1; i < n; i++) {
if (arr[i] < arr[iMin]) {
iMin = i;
}
}
if (iMin != j) {
int temp = arr[j];
arr[j] = arr[iMin];
arr[iMin] = temp;
}
}
}
__device__ int find_largest_number(int *arr, int n) {
int i;
int largest_number = -1;
for (i = 0; i < n; i++) {
if (arr[i] > largest_number) largest_number = arr[i];
}
return largest_number;
}
__device__ void radix_sort(int* arr, int n) {
// Base 10 is used
int i;
int *semi_sorted = (int*)malloc(n*sizeof(int));
int significant_digit = 1;
int largest_number = find_largest_number(arr, n);
// Loop until we reach the largest significant digit
while (largest_number / significant_digit > 0) {
int bucket[10] = {0};
// Counts the number of "keys" or digits that will go into each bucket
for (i = 0; i < n; i++) bucket[(arr[i] / significant_digit) % 10]++;
/**
* Add the count of the previous buckets,
* Acquires the indexes after the end of each bucket location in the arr
* Works similar to the count sort algorithm
**/
for (i = 1; i < 10; i++) bucket[i] += bucket[i - 1];
// Use the bucket to fill a "semi_sorted" arr
for (i = n - 1; i >= 0; i--)
semi_sorted[--bucket[(arr[i] / significant_digit) % 10]] = arr[i];
for (i = 0; i < n; i++) arr[i] = semi_sorted[i];
// Move to next significant digit
significant_digit *= 10;
}
}
__device__ void flip(int *arr, int i) {
int temp, start = 0;
while (start < i) {
temp = arr[start];
arr[start] = arr[i];
arr[i] = temp;
start++;
i--;
}
}
__device__ int find_max(int *arr, int n) {
int mi, i;
for (mi = 0, i = 0; i < n; ++i)
if (arr[i] > arr[mi]) mi = i;
return mi;
}
__device__ int pancake_sort(int* arr, int n) {
for (int curr_size = n; curr_size > 1; --curr_size) {
int mi = find_max(arr, curr_size);
if (mi != curr_size - 1) {
flip(arr, mi);
flip(arr, curr_size - 1);
}
}
}
__device__ void heap_sort(int a[], int n) {
build_heap(a, n);
int heap_size, i, temp;
heap_size = n - 1;
for (i = heap_size; i >= 0; i--) {
temp = a[0];
a[0] = a[heap_size];
a[heap_size] = temp;
heap_size--;
satisfy_heap(a, 0, heap_size);
}
}
__device__ void build_heap(int a[], int n) {
int i, heap_size;
heap_size = n - 1;
for (i = (n / 2); i >= 0; i--) {
satisfy_heap(a, i, heap_size);
}
}
__device__ void satisfy_heap(int a[], int i, int heap_size) {
int l, r, largest, temp;
l = 2 * i;
r = 2 * i + 1;
if (l <= heap_size && a[l] > a[i]) {
largest = l;
} else {
largest = i;
}
if (r <= heap_size && a[r] > a[largest]) {
largest = r;
}
if (largest != i) {
temp = a[i];
a[i] = a[largest];
a[largest] = temp;
satisfy_heap(a, largest, heap_size);
}
}
| e0afea9e3f02d25e6ba9519648effb37bbefea46.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
int NUMBEROFTEST = 1024;
typedef struct{
int length;
char typeOfSort;
}inputData;
typedef struct{
int *pArrayToSort;
}outputData;
__device__ void bubble_sort(int *pArray, int howMany);
__device__ void quick_sort(int * pArray, int start, int end);
__device__ void merge_sort(int* pArray, int length);
__device__ void merge(int * pArray, int * pTempArray, int leftStart, int* mid, int rightEnd);
__device__ void swap(int *x, int *y);
__device__ void gnome_sort(int *arr, int n);
__device__ void comb_sort(int *arr, int n);
__device__ void cocktail_sort(int *arr, int n);
__device__ void insertion_sort(int *arr, int n);
__device__ void odd_even_sort(int *arr, int n);
__device__ void shell_sort(int *arr, int n);
__device__ void selection_sort(int *arr, int n);
__device__ void radix_sort(int* arr, int n);
__device__ int find_largest_number(int arr[], int n);
__device__ int pancake_sort(int* arr, int n);
__device__ void flip(int *arr, int i) ;
__device__ int find_max(int *arr, int n);
__device__ void heap_sort(int *a, int n);
__device__ void build_heap(int *a, int n);
__device__ void satisfy_heap(int *a, int i, int heap_size);
//************************************** READING INPUT FROM FILE **************************************** /
void readingInput(inputData *dataCPU, outputData* dataOut){
int index =0;
int length;
char type;
int ret = fscanf(stdin, "%d %c",&length,&type);
while(ret != EOF){
dataCPU[index].length = length;
dataCPU[index].typeOfSort = type;
dataOut[index].pArrayToSort = (int*)malloc(length * sizeof(int));
int i;
for(i=0; i<length; i++)
fscanf(stdin, "%d", &dataOut[index].pArrayToSort[i]);
index++;
ret = fscanf(stdin, "%d %c",&length,&type);
}
NUMBEROFTEST = index;
}
__global__ void myKernel(inputData *dataCPU, outputData* dataOut)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
switch(dataCPU[index].typeOfSort){
case 'b' :
printf("Using bubblesort\n");
bubble_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'q' :
printf("Using quicksort\n");
quick_sort(dataOut[index].pArrayToSort, 0, dataCPU[index].length - 1);
break;
case 'm' :
printf("Using mergesort\n");
merge_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'o' :
printf("Using odd_even_sort\n");
odd_even_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'c' :
printf("Using cocktail_sort\n");
cocktail_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 't' :
printf("Using comb_sort\n");
comb_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'g' :
printf("Using gnome_sort\n");
gnome_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'i' :
printf("Using insertion_sort\n");
insertion_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'h' :
printf("Using shell_sort\n");
shell_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 's' :
printf("Using selection_sort\n");
selection_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'r' :
printf("Using radix_sort\n");
radix_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'p' :
printf("Using pancake_sort\n");
pancake_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
case 'u' :
printf("Using heap_sort\n");
heap_sort(dataOut[index].pArrayToSort, dataCPU[index].length);
break;
default:
printf("No Correct Sort Chosen\n");
break;
}
}
//************************************* Writing time in a FILE ******************************************
void writeFile(double time, char* fileName){
FILE *fp1;
fp1 = fopen("NUMCudaResults.txt", "a");
if (fp1 == NULL){
printf("Problem in a file");
return;
}
fprintf(fp1, "%s ", fileName);
fprintf(fp1, "%f seconds\n", time);
fclose(fp1);
}
void launch(inputData *dataCPU, outputData* dataOut, char* fileName){
int threadsPerBlock = 32;
int blocksPerGrid = (NUMBEROFTEST) / threadsPerBlock;
int output_size = (NUMBEROFTEST) * sizeof(outputData);
int input_size = (NUMBEROFTEST) * sizeof(inputData);
inputData *dataGPUIn, *in_data = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST);
outputData *dataGPUOut, *out_data = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int i =0;
cudaEventRecord(start);
for(; i<NUMBEROFTEST;i++){
int* d_array;
cudaMalloc((void**)&d_array, dataCPU[i].length*sizeof(int));
cudaMemcpy(d_array,dataOut[i].pArrayToSort, dataCPU[i].length*sizeof(int),cudaMemcpyHostToDevice);
in_data[i].length = dataCPU[i].length;
in_data[i].typeOfSort = dataCPU[i].typeOfSort;
out_data[i].pArrayToSort = d_array;
}
cudaMalloc ( (void**) &dataGPUIn, input_size);
cudaMemcpy(dataGPUIn, in_data, input_size, cudaMemcpyHostToDevice );
cudaMalloc ( (void**) &dataGPUOut, output_size);
cudaMemcpy(dataGPUOut, out_data, output_size, cudaMemcpyHostToDevice );
myKernel << <blocksPerGrid, threadsPerBlock >> >(dataGPUIn, dataGPUOut); // excute on kernel
cudaMemcpy(out_data, dataGPUOut, output_size, cudaMemcpyDeviceToHost);
i=0;
for(; i<NUMBEROFTEST;i++){
cudaMemcpy(dataOut[i].pArrayToSort, out_data[i].pArrayToSort, dataCPU[i].length*sizeof(int), cudaMemcpyDeviceToHost);
}
cudaFree(dataGPUIn);
cudaFree(dataGPUOut);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
writeFile((milliseconds/1000),fileName);
}
int main (int args, char* argv[])
{
inputData *dataCPU = (inputData*)malloc(sizeof(inputData)* NUMBEROFTEST);
outputData *dataOut = (outputData*)malloc(sizeof(outputData)* NUMBEROFTEST);
readingInput(dataCPU,dataOut);
launch(dataCPU,dataOut,argv[1]);
int index=0,i;
for(;index< NUMBEROFTEST; index++) {
printf("ID# %d ", index, dataCPU[index].length);
for(i = 0 ; i < dataCPU[index].length ; i++){
printf("%d ", *(dataOut[index].pArrayToSort + i));
}
printf("\n");
}
return 0;
}
/********* https://github.com/eddyerburgh/c-sorting-algorithms******/
__device__ void quick_sort(int arr[], int first_index, int last_index) {
// declaring index variables
int pivotIndex, temp, index_a, index_b;
if (first_index < last_index) {
// assigning first element index as pivot element
pivotIndex = first_index;
index_a = first_index;
index_b = last_index;
// Sorting in Ascending order with quick sort
while (index_a < index_b) {
while (arr[index_a] <= arr[pivotIndex] && index_a < last_index) {
index_a++;
}
while (arr[index_b] > arr[pivotIndex]) {
index_b--;
}
if (index_a < index_b) {
// Swapping operation
temp = arr[index_a];
arr[index_a] = arr[index_b];
arr[index_b] = temp;
}
}
// At the end of first iteration, swap pivot element with index_b element
temp = arr[pivotIndex];
arr[pivotIndex] = arr[index_b];
arr[index_b] = temp;
// Recursive call for quick sort, with partitioning
quick_sort(arr, first_index, index_b - 1);
quick_sort(arr, index_b + 1, last_index);
}
}
__device__ void merge(
int* arr, int* left_arr, int left_count, int* right_arr, int right_count) {
int i, j, k;
i = 0;
j = 0;
k = 0;
while (i < left_count && j < right_count) {
if (left_arr[i] < right_arr[j])
arr[k++] = left_arr[i++];
else
arr[k++] = right_arr[j++];
}
while (i < left_count) arr[k++] = left_arr[i++];
while (j < right_count) arr[k++] = right_arr[j++];
}
__device__ void merge_sort(int arr[], int n) {
int mid, i, *L, *right_arr;
if (n < 2) return;
mid = n / 2;
L = (int*)malloc(mid * sizeof(int));
right_arr = (int*)malloc((n - mid) * sizeof(int));
for (i = 0; i < mid; i++) L[i] = arr[i];
for (i = mid; i < n; i++) right_arr[i - mid] = arr[i];
merge_sort(L, mid);
merge_sort(right_arr, n - mid);
merge(arr, L, mid, right_arr, n - mid);
free(L);
free(right_arr);
}
__device__ void bubble_sort(int arr[], int n) {
int temp;
int i;
int j;
i = 0;
while (i < n) {
j = 0;
while (j < i) {
if (arr[j] > arr[i]) {
temp = arr[j];
arr[j] = arr[i];
arr[i] = temp;
}
j++;
}
i++;
}
}
__device__ void odd_even_sort(int *arr, int n) {
bool sorted = false; // Initially array is unsorted
int temp;
while (!sorted) {
sorted = true;
// Perform Bubble sort on odd indexed element
for (int i = 1; i <= n - 2; i += 2) {
if (arr[i] > arr[i + 1]) {
temp = arr[i + 1];
arr[i + 1] = arr[i];
arr[i] = temp;
sorted = false;
}
}
// Perform Bubble sort on even indexed element
for (int i = 0; i <= n - 2; i += 2) {
if (arr[i] > arr[i + 1]) {
temp = arr[i + 1];
arr[i + 1] = arr[i];
arr[i] = temp;
sorted = false;
}
}
}
}
__device__ void cocktail_sort(int *arr, int n) {
bool swapped = true;
int i = 0;
int j = n - 1;
while (i < j && swapped) {
swapped = false;
for (int k = i; k < j; k++) {
if (arr[k] > arr[k + 1]) {
int temp = arr[k];
arr[k] = arr[k + 1];
arr[k + 1] = temp;
swapped = true;
}
}
j--;
if (swapped) {
swapped = false;
for (int k = j; k > i; k--) {
if (arr[k] < arr[k - 1]) {
int temp = arr[k];
arr[k] = arr[k - 1];
arr[k - 1] = temp;
swapped = true;
}
}
}
i++;
}
}
__device__ void comb_sort(int *arr, int n) {
int temp;
int i;
int j;
int gap = n;
int shrink = 3;
bool swapped;
while (gap != 1 || swapped == true) {
gap = (gap / shrink);
if (gap < 1) {
gap = 1;
}
swapped = false;
for (int i = 0; i < n - gap; i++) {
if (arr[i] > arr[i + gap]) {
temp = arr[i + gap];
arr[i + gap] = arr[i];
arr[i] = temp;
swapped = true;
}
}
}
}
__device__ void gnome_sort(int *arr, int n) {
int i = 0;
int temp;
while (i < n) {
if (i == 0 || arr[i] >= arr[i - 1]) {
i++;
} else {
temp = arr[i];
arr[i] = arr[i - 1];
arr[i - 1] = temp;
i--;
}
}
}
__device__ void insertion_sort(int *arr, int n) {
for (int i = 0; i < n; i++) {
int j = i;
while (j > 0 && arr[j - 1] > arr[j]) {
int temp = arr[j];
arr[j] = arr[j - 1];
arr[j - 1] = temp;
j--;
}
}
}
__device__ void shell_sort(int *arr, int n) {
int j;
for (int gap = n / 2; gap > 0; gap /= 2) {
for (int i = gap; i < n; ++i) {
int temp = arr[i];
for (j = i; j >= gap && temp < arr[j - gap]; j -= gap) {
arr[j] = arr[j - gap];
}
arr[j] = temp;
}
}
}
__device__ void selection_sort(int *arr, int n) {
int i, j;
for (j = 0; j < n - 1; j++) {
int iMin = j;
for (i = j + 1; i < n; i++) {
if (arr[i] < arr[iMin]) {
iMin = i;
}
}
if (iMin != j) {
int temp = arr[j];
arr[j] = arr[iMin];
arr[iMin] = temp;
}
}
}
__device__ int find_largest_number(int *arr, int n) {
int i;
int largest_number = -1;
for (i = 0; i < n; i++) {
if (arr[i] > largest_number) largest_number = arr[i];
}
return largest_number;
}
__device__ void radix_sort(int* arr, int n) {
// Base 10 is used
int i;
int *semi_sorted = (int*)malloc(n*sizeof(int));
int significant_digit = 1;
int largest_number = find_largest_number(arr, n);
// Loop until we reach the largest significant digit
while (largest_number / significant_digit > 0) {
int bucket[10] = {0};
// Counts the number of "keys" or digits that will go into each bucket
for (i = 0; i < n; i++) bucket[(arr[i] / significant_digit) % 10]++;
/**
* Add the count of the previous buckets,
* Acquires the indexes after the end of each bucket location in the arr
* Works similar to the count sort algorithm
**/
for (i = 1; i < 10; i++) bucket[i] += bucket[i - 1];
// Use the bucket to fill a "semi_sorted" arr
for (i = n - 1; i >= 0; i--)
semi_sorted[--bucket[(arr[i] / significant_digit) % 10]] = arr[i];
for (i = 0; i < n; i++) arr[i] = semi_sorted[i];
// Move to next significant digit
significant_digit *= 10;
}
}
__device__ void flip(int *arr, int i) {
int temp, start = 0;
while (start < i) {
temp = arr[start];
arr[start] = arr[i];
arr[i] = temp;
start++;
i--;
}
}
__device__ int find_max(int *arr, int n) {
int mi, i;
for (mi = 0, i = 0; i < n; ++i)
if (arr[i] > arr[mi]) mi = i;
return mi;
}
__device__ int pancake_sort(int* arr, int n) {
for (int curr_size = n; curr_size > 1; --curr_size) {
int mi = find_max(arr, curr_size);
if (mi != curr_size - 1) {
flip(arr, mi);
flip(arr, curr_size - 1);
}
}
}
__device__ void heap_sort(int a[], int n) {
build_heap(a, n);
int heap_size, i, temp;
heap_size = n - 1;
for (i = heap_size; i >= 0; i--) {
temp = a[0];
a[0] = a[heap_size];
a[heap_size] = temp;
heap_size--;
satisfy_heap(a, 0, heap_size);
}
}
__device__ void build_heap(int a[], int n) {
int i, heap_size;
heap_size = n - 1;
for (i = (n / 2); i >= 0; i--) {
satisfy_heap(a, i, heap_size);
}
}
__device__ void satisfy_heap(int a[], int i, int heap_size) {
int l, r, largest, temp;
l = 2 * i;
r = 2 * i + 1;
if (l <= heap_size && a[l] > a[i]) {
largest = l;
} else {
largest = i;
}
if (r <= heap_size && a[r] > a[largest]) {
largest = r;
}
if (largest != i) {
temp = a[i];
a[i] = a[largest];
a[largest] = temp;
satisfy_heap(a, largest, heap_size);
}
}
|
438dfc98772d7aae610a4b0ba9a62e0df56c44f2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_common.cuh"
#include "common.h"
__global__ void gpuRecursiveReduce(int *g_idata, int *g_odata,
unsigned int isize)
{
int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x*blockDim.x;
int *odata = &g_odata[blockIdx.x];
//stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
//nested invocation
int istride = isize >> 1;
if (istride > 1 && tid < istride)
{
//in place reduction
idata[tid] += idata[tid + istride];
}
//sync at block level
__syncthreads();
//nested invocation to generate child grids
if (tid == 0)
{
gpuRecursiveReduce << <1, istride >> > (idata, odata, istride);
hipDeviceSynchronize();
}
//sync at block level again
__syncthreads();
}
// ##################################################################
__global__ void gpuRecursiveReduce2(int *g_idata, int *g_odata,
unsigned int isize)
{
int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x*blockDim.x;
int *odata = &g_odata[blockIdx.x];
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
int istride = isize >> 4;
if (istride > 1 && tid < istride)
{
idata[tid] += idata[tid + istride*8];
idata[tid] += idata[tid + istride*4];
idata[tid] += idata[tid + istride*2];
idata[tid] += idata[tid + istride]; // This is wrong
}
//__syncthreads();
if (tid == 0)
{
gpuRecursiveReduce2 << <1, istride >> > (idata, odata, istride);
hipDeviceSynchronize();
}
//sync at block level again
__syncthreads();
}
int main(int argc, char ** argv)
{
printf("Running parallel reduction with interleaved pairs kernel \n");
int size = 1 << 22;
int byte_size = size * sizeof(int);
int block_size = 512;
clock_t gpu_start, gpu_end,cpu_start, cpu_end;
int * h_input, *h_ref;
h_input = (int*)malloc(byte_size);
initialize(h_input, size, INIT_RANDOM);
cpu_start = clock();
int cpu_result = reduction_cpu(h_input, size);
cpu_end = clock();
dim3 block(block_size);
dim3 grid(size / block.x);
printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
int temp_array_byte_size = sizeof(int)* grid.x;
h_ref = (int*)malloc(temp_array_byte_size);
int * d_input, *d_temp;
gpuErrchk(hipMalloc((void**)&d_input, byte_size));
gpuErrchk(hipMalloc((void**)&d_temp, temp_array_byte_size));
gpu_start = clock();
gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gpuRecursiveReduce2) , dim3(grid), dim3(block) , 0, 0, d_input, d_temp,block_size);
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
int gpu_result = 0;
for (int i = 0; i < grid.x; i++)
{
gpu_result += h_ref[i];
}
gpu_end = clock();
print_time_using_host_clock(gpu_start, gpu_end);
printf("CPU kernel execution time : %4.6f \n",
(double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
compare_results(gpu_result, cpu_result);
gpuErrchk(hipFree(d_input));
gpuErrchk(hipFree(d_temp));
free(h_input);
free(h_ref);
hipDeviceReset();
return 0;
}
| 438dfc98772d7aae610a4b0ba9a62e0df56c44f2.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_common.cuh"
#include "common.h"
__global__ void gpuRecursiveReduce(int *g_idata, int *g_odata,
unsigned int isize)
{
int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x*blockDim.x;
int *odata = &g_odata[blockIdx.x];
//stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
//nested invocation
int istride = isize >> 1;
if (istride > 1 && tid < istride)
{
//in place reduction
idata[tid] += idata[tid + istride];
}
//sync at block level
__syncthreads();
//nested invocation to generate child grids
if (tid == 0)
{
gpuRecursiveReduce << <1, istride >> > (idata, odata, istride);
cudaDeviceSynchronize();
}
//sync at block level again
__syncthreads();
}
// ##################################################################
__global__ void gpuRecursiveReduce2(int *g_idata, int *g_odata,
unsigned int isize)
{
int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x*blockDim.x;
int *odata = &g_odata[blockIdx.x];
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
int istride = isize >> 4;
if (istride > 1 && tid < istride)
{
idata[tid] += idata[tid + istride*8];
idata[tid] += idata[tid + istride*4];
idata[tid] += idata[tid + istride*2];
idata[tid] += idata[tid + istride]; // This is wrong
}
//__syncthreads();
if (tid == 0)
{
gpuRecursiveReduce2 << <1, istride >> > (idata, odata, istride);
cudaDeviceSynchronize();
}
//sync at block level again
__syncthreads();
}
int main(int argc, char ** argv)
{
printf("Running parallel reduction with interleaved pairs kernel \n");
int size = 1 << 22;
int byte_size = size * sizeof(int);
int block_size = 512;
clock_t gpu_start, gpu_end,cpu_start, cpu_end;
int * h_input, *h_ref;
h_input = (int*)malloc(byte_size);
initialize(h_input, size, INIT_RANDOM);
cpu_start = clock();
int cpu_result = reduction_cpu(h_input, size);
cpu_end = clock();
dim3 block(block_size);
dim3 grid(size / block.x);
printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
int temp_array_byte_size = sizeof(int)* grid.x;
h_ref = (int*)malloc(temp_array_byte_size);
int * d_input, *d_temp;
gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
gpuErrchk(cudaMalloc((void**)&d_temp, temp_array_byte_size));
gpu_start = clock();
gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
cudaMemcpyHostToDevice));
gpuRecursiveReduce2 <<< grid, block >>> (d_input, d_temp,block_size);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
int gpu_result = 0;
for (int i = 0; i < grid.x; i++)
{
gpu_result += h_ref[i];
}
gpu_end = clock();
print_time_using_host_clock(gpu_start, gpu_end);
printf("CPU kernel execution time : %4.6f \n",
(double)((double)(cpu_end - cpu_start) / CLOCKS_PER_SEC));
compare_results(gpu_result, cpu_result);
gpuErrchk(cudaFree(d_input));
gpuErrchk(cudaFree(d_temp));
free(h_input);
free(h_ref);
cudaDeviceReset();
return 0;
}
|
a1b2028850a5d96776be0b14979f6330e00bbab5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file preloaded_multi_sgd.cu
* \brief Multi-sgd optimizers with lrs and wds as mxnet inputs
* \author Clement Fuji Tsang
*/
#include "./preloaded_multi_sgd-inl.h"
#include <hipcub/hipcub.hpp>
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(preloaded_multi_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", PreloadedMultiSGDUpdate<gpu, preloaded_type_identity, 2>);
NNVM_REGISTER_OP(preloaded_multi_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>",
PreloadedMultiSGDMomUpdate<gpu, preloaded_type_identity, 3>);
NNVM_REGISTER_OP(preloaded_multi_mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>",
PreloadedMultiSGDUpdate<gpu, preloaded_single_precision, 3>);
NNVM_REGISTER_OP(preloaded_multi_mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>",
PreloadedMultiSGDMomUpdate<gpu, preloaded_single_precision, 4>);
} // namespace op
} // namespace mxnet
| a1b2028850a5d96776be0b14979f6330e00bbab5.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file preloaded_multi_sgd.cu
* \brief Multi-sgd optimizers with lrs and wds as mxnet inputs
* \author Clement Fuji Tsang
*/
#include "./preloaded_multi_sgd-inl.h"
#include <cub/cub.cuh>
namespace mxnet {
namespace op {
NNVM_REGISTER_OP(preloaded_multi_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", PreloadedMultiSGDUpdate<gpu, preloaded_type_identity, 2>);
NNVM_REGISTER_OP(preloaded_multi_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>",
PreloadedMultiSGDMomUpdate<gpu, preloaded_type_identity, 3>);
NNVM_REGISTER_OP(preloaded_multi_mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>",
PreloadedMultiSGDUpdate<gpu, preloaded_single_precision, 3>);
NNVM_REGISTER_OP(preloaded_multi_mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>",
PreloadedMultiSGDMomUpdate<gpu, preloaded_single_precision, 4>);
} // namespace op
} // namespace mxnet
|
9df81bb83424179adcdd7212edff0be85342bce8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <boost/log/trivial.hpp>
#include "cudatools/errorHandling.cuh"
#include "surfaceAndVolume.cuh"
#include "marchingCubes.cuh"
#include "common_hip.cuh"
using namespace std;
using namespace commonns;
using namespace thrust;
#define COMPUTE_ADVANCED_STATS 1
// Utility
float calculateSum(float *d_data, int size){
thrust::device_ptr<float> dptr_d_data(d_data);
return thrust::reduce(dptr_d_data, dptr_d_data+size, (float) 0, thrust::plus<float>());
}
float4 calculateFloat4Sum(float4 *d_data, int size){
thrust::device_ptr<float4> dptr_d_data(d_data);
return thrust::reduce(dptr_d_data, dptr_d_data+size, make_float4(0,0,0,0), thrust::plus<float4>());
}
float3 calculateFloat3Sum(float3 *d_data, int size){
thrust::device_ptr<float3> dptr_d_data(d_data);
return thrust::reduce(dptr_d_data, dptr_d_data+size, make_float3(0.0f, 0.0f, 0.0f), thrust::plus<float3>());
}
// Surface
__device__ float calculateSurfaceInCube(float *d_data, Point3D p, Size3D size){
float voxelSurface(0.0f);
Triangle triangles[5];
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
if(marchingCubes.onIsoSurface(p.getInt3())){
int nOfTriangles = marchingCubes.getVoxelTriangles(p.getInt3(), triangles);
for(uint8_t triId = 0; triId < nOfTriangles; triId++){
voxelSurface += triangles[int(triId)].area();
}
}
return voxelSurface;
}
__global__ void computeSurfaceKernel(float *d_data, float *d_surface, float dh, Size3D size){
// Should allocate the shared memory to the marching cubes!
initSharedMemory();
Point3D p = getThread3D();
FltFunc3D surf(size, d_surface);
if(p >=0 && p < Point3D(size)-1){
surf[p] = pow(dh,2)*calculateSurfaceInCube(d_data, p, size);
} else if(!(p-Point3D(size)-1 > 0) && p < Point3D(size)) {
surf[p] = 0;
}
}
void computeSurface(DevFloatChk& data, DevFloatChk& surface, float dh, Size3D size){
GpuConf3D conf(size, 4);
hipLaunchKernelGGL(( computeSurfaceKernel), dim3(conf.grid()), dim3(conf.block()), MC_LOC_SIZE*64, 0, data.getPtr(), surface.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateSurfaceKernel error");
}
float getSurface_global(DevFloatChk& data, DevFloatChk& surface, float dh, Size3D size){
GpuConf3D conf(size, 4);
hipLaunchKernelGGL(( computeSurfaceKernel), dim3(conf.grid()), dim3(conf.block()), MC_LOC_SIZE*64, 0, data.getPtr(), surface.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateSurfaceKernel error");
return calculateSum(surface.getPtr(), size.vol());
}
// Volume
__device__ int checkOrientation(float3 A, float3 B){
if(A*B >= 0){
return 1;
} else {
return -1;
}
}
__device__ float computeVolume(float *d_data, Triangle tri, Point3D p, Size3D size){
float3 refVector = p.getFloat3()-tri.A;
FltFunc3D data(size, d_data);
// determine that the reference point (i,j,k) is below or above the surface
int refSign = -1;
if(data[p] >= 0){
refSign=1;
}
refSign = sgn(data[p]);
// create vectors corresponding to sides of the triangle
float3 bma = tri.B-tri.A;
float3 cma = tri.C-tri.A;
// determine the orientation of s1 x s2 with respect to the surface
float3 cr = cross(bma,cma);
int orientation = checkOrientation(cr, refVector);
float V = (cr*tri.A)/6;
if(refSign == 1){
if(orientation == 1){
return V;
} else {
return -V;
}
} else {
if(orientation == 1){
return -V;
} else {
return V;
}
}
}
__device__ float calculateVolumeForCube(float *d_data, Point3D p, Size3D size){
float volume(0);
Triangle tris[5];
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
int nTr = marchingCubes.getVoxelTriangles(p.getInt3(), tris);
for(uint8_t triId = 0; triId < nTr; triId++){
Triangle tr = tris[triId];
volume += computeVolume(d_data,tr,Point3D(int(tr.A.x),int(tr.A.y),int(tr.A.z)),size);
}
return volume;
}
__global__ void calculateVolumeKernel(float *d_data, float *d_volume, float dh, Size3D size){
initSharedMemory();
Point3D p = getThread3D();
FltFunc3D vol(size, d_volume);
if(p >= 0 && p < Point3D(size)-1){
vol[p] = pow(dh, 3)*calculateVolumeForCube(d_data, p, size);
} else if((p*1 != p || !(p-Point3D(size)-1 > 0)) && p < Point3D(size)){
vol[p] = 0;
}
}
void launchCalculateVolume(DevFloatChk& data, DevFloatChk& volume, float dh, Size3D size){
GpuConf3D conf(size, 4);
hipLaunchKernelGGL(( calculateVolumeKernel), dim3(conf.grid()), dim3(conf.block()), MC_LOC_SIZE*64, 0, data.getPtr(), volume.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateVolumeKernel error");
}
float getVolume_global(DevFloatChk& data, DevFloatChk& volume, float dh, Size3D size){
GpuConf3D conf(size, 4);
hipLaunchKernelGGL(( calculateVolumeKernel), dim3(conf.grid()), dim3(conf.block()), MC_LOC_SIZE*64, 0, data.getPtr(), volume.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateVolumeKernel error");
return calculateSum(volume.getPtr(), size.vol());
}
// Center of gravity
__device__ float3 cogContribVoxel(float *d_data, float dh, Point3D p, Size3D size){
float3 preCOG = make_float3(0.0f, 0.0f, 0.0f);
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
Triangle triangles[5];
int nOfTriangles = marchingCubes.getVoxelTriangles(p.getInt3(), triangles);
for(int i = 0; i < nOfTriangles; i++){
Triangle tri = triangles[i];
float3 cogArea = tri.cog() * tri.area();
preCOG = preCOG + cogArea;
}
return dh*preCOG;
}
__global__ void centerOfGravityKernel(float *d_data, float3 *d_centerOfGravity, float dh, Size3D size){
initSharedMemory();
Point3D p = getThread3D();
Func3D<float3> cog(size, d_centerOfGravity);
if(p >= 0 && p < Point3D(size)-1){
cog[p] = cogContribVoxel(d_data, dh, p, size);
}
}
void launchCenterOfGravity(float *d_data, float3 *d_centerOfGravity, float dh, Size3D size){
GpuConf3D conf(size, 4);
hipLaunchKernelGGL(( centerOfGravityKernel), dim3(conf.grid()), dim3(conf.block()), MC_LOC_SIZE*64, 0, d_data, d_centerOfGravity, dh, size);
KERNEL_ERROR_CHECK("centerOfGravityKernel error");
}
// Global center of gravity...
float3 getCOG(DevFloatChk& data, DeviceChunk<float3>& centerOfGravity, float dh, Size3D gridDims){
launchCenterOfGravity(data.getPtr(), centerOfGravity.getPtr(), dh, gridDims);
float3 preCOG = calculateFloat3Sum(centerOfGravity.getPtr(), gridDims.vol());
return preCOG;
}
// Second moment
__device__ float cubeContribution(float* d_data, float3 cogObj, float dh, Point3D p, Size3D size){
if(!((Point3D(size)-1-p)>0)){
return 0;
}
float cogContribution(0.0f);
Triangle triangles[5];
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
if(marchingCubes.onIsoSurface(p.getInt3())){
int nOfTriangles = marchingCubes.getVoxelTriangles(p.getInt3(), triangles);
for(uint8_t triId = 0; triId < nOfTriangles; triId++){
Triangle tri = triangles[int(triId)];
float3 cogTri = tri.cog();
float3 R = cogTri-cogObj;
float Rsq = R*R;
float triContrib = Rsq*tri.area();
cogContribution += triContrib;
}
}
return cogContribution;
}
__global__ void secondMomentKernel(float *d_levelSet, float *d_secondMoment, float3* d_cog, int *d_nodeID, float dh, Size3D size){
initSharedMemory();
Point3D p = getThread3D();
IntFunc3D f_nodeID(size, d_nodeID);
FltFunc3D f_secondMoment(size, d_secondMoment);
if(p>=0 && p < Point3D(size)) {
if(p < Point3D(size)-1) {
int nodeID = f_nodeID[p];
if(nodeID == -1) {
f_secondMoment[p] = 0;
} else {
float3 cog = d_cog[nodeID+1];
f_secondMoment[p] = cubeContribution(d_levelSet, cog, dh, p, size);
}
} else {
f_secondMoment[p] = 0;
}
}
}
float getSecondMoment_global(DevFloatChk& levelSet, DevFloatChk& secondMoment, DeviceChunk<float3>& cog, DevIntChk& nodeID, float dh, Size3D gridDims){
GpuConf3D conf(gridDims, 4);
hipLaunchKernelGGL(( secondMomentKernel), dim3(conf.grid()), dim3(conf.block()), MC_LOC_SIZE*64, 0, levelSet.getPtr(), secondMoment.getPtr(), cog.getPtr(), nodeID.getPtr(), dh, gridDims);
KERNEL_ERROR_CHECK("secondMomentKernel error");
return calculateSum(secondMoment.getPtr(), gridDims.vol());
}
CompStats extractComponentStatistics(float* d_statistics, DevIntChk& ccresult, int numOfComponents, commonns::Dimensions dims){
// Step 0.: copy the ccresult array to a brand new working array to do not mess it up the further steps of the segmentation.
int ccNelements = ccresult.getElements();
DevIntChk cCcresult(ccNelements);
ccresult.copy(cCcresult);
// Step 1.: Sort the ccresult_copy and the statistics array by considering it as ccresult->statistics map and sort it by considering the ccresult as the key
thrust::device_ptr<int> ccresult_copy_ptr = cCcresult.getTPtr();
thrust::device_ptr<float> statistics_ptr(d_statistics);
thrust::sort_by_key(ccresult_copy_ptr, ccresult_copy_ptr + ccNelements, statistics_ptr);
// Step 2.: Parallel reduce the values assigned to the same key to the output arrays
auto resultComponentIds = unique_ptr<DevIntChk>(new DevIntChk(ccNelements));
auto resultComponentContributions = unique_ptr<DevFloatChk>(new DevFloatChk(ccNelements));
thrust::reduce_by_key(
ccresult_copy_ptr, ccresult_copy_ptr + dims.s(),
statistics_ptr,
resultComponentIds.get()->getTPtr(),
resultComponentContributions.get()->getTPtr());
CompStats ret;
ret.first = std::move(resultComponentIds);
ret.second = std::move(resultComponentContributions);
return ret;
}
CompStatsF3 extractComponentStatistics(float3* d_statistics, DevIntChk& ccresult, int numOfComponents, commonns::Dimensions dims){
// Step 0.: copy the ccresult array to a brand new working array to do not mess it up the further steps of the segmentation.
int ccNelements = ccresult.getElements();
DevIntChk cCcresult(ccNelements);
ccresult.copy(cCcresult);
// Step 1.: Sort the ccresult_copy and the statistics array by considering it as ccresult->statistics map and sort it by considering the ccresult as the key
thrust::device_ptr<int> ccresult_copy_ptr = cCcresult.getTPtr();
thrust::device_ptr<float3> statistics_ptr(d_statistics);
thrust::sort_by_key(ccresult_copy_ptr, ccresult_copy_ptr + ccNelements, statistics_ptr);
// Step 2.: Parallel reduce the values assigned to the same key to the output arrays
auto resultComponentIds = unique_ptr<DevIntChk>(new DevIntChk(ccNelements));
auto resultComponentContributions = unique_ptr<DeviceChunk<float3> >(new DeviceChunk<float3>(ccNelements));
thrust::reduce_by_key(
ccresult_copy_ptr, ccresult_copy_ptr + dims.s(),
statistics_ptr,
resultComponentIds.get()->getTPtr(),
resultComponentContributions.get()->getTPtr(),
thrust::equal_to<int>(),
thrust::plus<float3>());
CompStatsF3 ret;
ret.first = std::move(resultComponentIds);
ret.second = std::move(resultComponentContributions);
return ret;
}
void printStats(CurveProps& curParams, int nComps, ostream& stream){
HostFloatChk hSurf(nComps), hVol(nComps), hSm(nComps);
HostChunk<float3> hCog(nComps);
curParams.surfContribs.second->copyHostN(hSurf, nComps);
curParams.volContribs.second->copyHostN(hVol, nComps);
curParams.cogContribs.second->copyHostN(hCog, nComps);
curParams.smContribs.second->copyHostN(hSm, nComps);
BOOST_LOG_TRIVIAL(info) << "Components: (n=" << nComps-1 << ")";
for(int i = 1; i < nComps; i++){
float cSurf = hSurf[i];
float cVol = hVol[i];
float3 cCog = hCog[i];
float pPlasma = pow(cSurf, 3.0f/2.0f)/cVol;
float cSm = hSm[i];
BOOST_LOG_TRIVIAL(info) <<
"\t(" << i <<
"): volume=" << cVol <<
", surface=" << cSurf <<
", plasma=" << pPlasma <<
", center of gravity=(" << cCog.x << "," << cCog.y << "," << cCog.z <<
") second moment=" << cSm;
}
BOOST_LOG_TRIVIAL(info) << "Total: " << "volume=" << curParams.vol << ", surface=" << curParams.surf;
}
CurveProps computeStats(unique_ptr<DevFloatChk>& ls1, unique_ptr<DevIntChk>& ccResult, int nComps, GridParams gParams){
DevFloatChk aux(gParams.gridSize.vol());
CurveProps curParams;
// Surface
curParams.surf = getSurface_global(*ls1, aux, gParams.gridRes, gParams.gridSize);
curParams.surfContribs = extractComponentStatistics(aux.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
//float* surfC = curParams.surfContribs.second->getPtr();
//fill(device, surfC, surfC+curParams.surfContribs.second->getElements(), curParams.surf);
// Volume
curParams.vol = getVolume_global(*ls1, aux, gParams.gridRes, gParams.gridSize);
curParams.volContribs = extractComponentStatistics(aux.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
//float* volC = curParams.volContribs.second->getPtr();
//fill(device, volC, volC+curParams.volContribs.second->getElements(), curParams.vol);
if(COMPUTE_ADVANCED_STATS){
// CoG
DeviceChunk<float3> auxf3(gParams.gridSize.vol());
curParams.cog = getCOG(*ls1, auxf3, gParams.gridRes, gParams.gridSize);
curParams.cogContribs = extractComponentStatistics(auxf3.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
// Second moment
curParams.sm = getSecondMoment_global(*ls1, aux, *(curParams.cogContribs.second), *ccResult, gParams.gridRes, gParams.gridSize);
curParams.smContribs = extractComponentStatistics(aux.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
}
return curParams;
}
| 9df81bb83424179adcdd7212edff0be85342bce8.cu | #include <iostream>
#include <stdio.h>
#include <math.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <boost/log/trivial.hpp>
#include "cudatools/errorHandling.cuh"
#include "surfaceAndVolume.cuh"
#include "marchingCubes.cuh"
#include "common.cuh"
using namespace std;
using namespace commonns;
using namespace thrust;
#define COMPUTE_ADVANCED_STATS 1
// Utility
float calculateSum(float *d_data, int size){
thrust::device_ptr<float> dptr_d_data(d_data);
return thrust::reduce(dptr_d_data, dptr_d_data+size, (float) 0, thrust::plus<float>());
}
float4 calculateFloat4Sum(float4 *d_data, int size){
thrust::device_ptr<float4> dptr_d_data(d_data);
return thrust::reduce(dptr_d_data, dptr_d_data+size, make_float4(0,0,0,0), thrust::plus<float4>());
}
float3 calculateFloat3Sum(float3 *d_data, int size){
thrust::device_ptr<float3> dptr_d_data(d_data);
return thrust::reduce(dptr_d_data, dptr_d_data+size, make_float3(0.0f, 0.0f, 0.0f), thrust::plus<float3>());
}
// Surface
__device__ float calculateSurfaceInCube(float *d_data, Point3D p, Size3D size){
float voxelSurface(0.0f);
Triangle triangles[5];
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
if(marchingCubes.onIsoSurface(p.getInt3())){
int nOfTriangles = marchingCubes.getVoxelTriangles(p.getInt3(), triangles);
for(uint8_t triId = 0; triId < nOfTriangles; triId++){
voxelSurface += triangles[int(triId)].area();
}
}
return voxelSurface;
}
__global__ void computeSurfaceKernel(float *d_data, float *d_surface, float dh, Size3D size){
// Should allocate the shared memory to the marching cubes!
initSharedMemory();
Point3D p = getThread3D();
FltFunc3D surf(size, d_surface);
if(p >=0 && p < Point3D(size)-1){
surf[p] = pow(dh,2)*calculateSurfaceInCube(d_data, p, size);
} else if(!(p-Point3D(size)-1 > 0) && p < Point3D(size)) {
surf[p] = 0;
}
}
void computeSurface(DevFloatChk& data, DevFloatChk& surface, float dh, Size3D size){
GpuConf3D conf(size, 4);
computeSurfaceKernel<<<conf.grid(), conf.block(), MC_LOC_SIZE*64>>>(data.getPtr(), surface.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateSurfaceKernel error");
}
float getSurface_global(DevFloatChk& data, DevFloatChk& surface, float dh, Size3D size){
GpuConf3D conf(size, 4);
computeSurfaceKernel<<<conf.grid(), conf.block(), MC_LOC_SIZE*64>>>(data.getPtr(), surface.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateSurfaceKernel error");
return calculateSum(surface.getPtr(), size.vol());
}
// Volume
__device__ int checkOrientation(float3 A, float3 B){
if(A*B >= 0){
return 1;
} else {
return -1;
}
}
__device__ float computeVolume(float *d_data, Triangle tri, Point3D p, Size3D size){
float3 refVector = p.getFloat3()-tri.A;
FltFunc3D data(size, d_data);
// determine that the reference point (i,j,k) is below or above the surface
int refSign = -1;
if(data[p] >= 0){
refSign=1;
}
refSign = sgn(data[p]);
// create vectors corresponding to sides of the triangle
float3 bma = tri.B-tri.A;
float3 cma = tri.C-tri.A;
// determine the orientation of s1 x s2 with respect to the surface
float3 cr = cross(bma,cma);
int orientation = checkOrientation(cr, refVector);
float V = (cr*tri.A)/6;
if(refSign == 1){
if(orientation == 1){
return V;
} else {
return -V;
}
} else {
if(orientation == 1){
return -V;
} else {
return V;
}
}
}
__device__ float calculateVolumeForCube(float *d_data, Point3D p, Size3D size){
float volume(0);
Triangle tris[5];
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
int nTr = marchingCubes.getVoxelTriangles(p.getInt3(), tris);
for(uint8_t triId = 0; triId < nTr; triId++){
Triangle tr = tris[triId];
volume += computeVolume(d_data,tr,Point3D(int(tr.A.x),int(tr.A.y),int(tr.A.z)),size);
}
return volume;
}
__global__ void calculateVolumeKernel(float *d_data, float *d_volume, float dh, Size3D size){
initSharedMemory();
Point3D p = getThread3D();
FltFunc3D vol(size, d_volume);
if(p >= 0 && p < Point3D(size)-1){
vol[p] = pow(dh, 3)*calculateVolumeForCube(d_data, p, size);
} else if((p*1 != p || !(p-Point3D(size)-1 > 0)) && p < Point3D(size)){
vol[p] = 0;
}
}
void launchCalculateVolume(DevFloatChk& data, DevFloatChk& volume, float dh, Size3D size){
GpuConf3D conf(size, 4);
calculateVolumeKernel<<<conf.grid(), conf.block(), MC_LOC_SIZE*64>>>(data.getPtr(), volume.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateVolumeKernel error");
}
float getVolume_global(DevFloatChk& data, DevFloatChk& volume, float dh, Size3D size){
GpuConf3D conf(size, 4);
calculateVolumeKernel<<<conf.grid(), conf.block(), MC_LOC_SIZE*64>>>(data.getPtr(), volume.getPtr(), dh, size);
KERNEL_ERROR_CHECK("calculateVolumeKernel error");
return calculateSum(volume.getPtr(), size.vol());
}
// Center of gravity
__device__ float3 cogContribVoxel(float *d_data, float dh, Point3D p, Size3D size){
float3 preCOG = make_float3(0.0f, 0.0f, 0.0f);
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
Triangle triangles[5];
int nOfTriangles = marchingCubes.getVoxelTriangles(p.getInt3(), triangles);
for(int i = 0; i < nOfTriangles; i++){
Triangle tri = triangles[i];
float3 cogArea = tri.cog() * tri.area();
preCOG = preCOG + cogArea;
}
return dh*preCOG;
}
__global__ void centerOfGravityKernel(float *d_data, float3 *d_centerOfGravity, float dh, Size3D size){
initSharedMemory();
Point3D p = getThread3D();
Func3D<float3> cog(size, d_centerOfGravity);
if(p >= 0 && p < Point3D(size)-1){
cog[p] = cogContribVoxel(d_data, dh, p, size);
}
}
void launchCenterOfGravity(float *d_data, float3 *d_centerOfGravity, float dh, Size3D size){
GpuConf3D conf(size, 4);
centerOfGravityKernel<<<conf.grid(), conf.block(), MC_LOC_SIZE*64>>>(d_data, d_centerOfGravity, dh, size);
KERNEL_ERROR_CHECK("centerOfGravityKernel error");
}
// Global center of gravity...
float3 getCOG(DevFloatChk& data, DeviceChunk<float3>& centerOfGravity, float dh, Size3D gridDims){
launchCenterOfGravity(data.getPtr(), centerOfGravity.getPtr(), dh, gridDims);
float3 preCOG = calculateFloat3Sum(centerOfGravity.getPtr(), gridDims.vol());
return preCOG;
}
// Second moment
__device__ float cubeContribution(float* d_data, float3 cogObj, float dh, Point3D p, Size3D size){
if(!((Point3D(size)-1-p)>0)){
return 0;
}
float cogContribution(0.0f);
Triangle triangles[5];
Func3D<float> levelSet(size, d_data);
Cubes marchingCubes(levelSet);
if(marchingCubes.onIsoSurface(p.getInt3())){
int nOfTriangles = marchingCubes.getVoxelTriangles(p.getInt3(), triangles);
for(uint8_t triId = 0; triId < nOfTriangles; triId++){
Triangle tri = triangles[int(triId)];
float3 cogTri = tri.cog();
float3 R = cogTri-cogObj;
float Rsq = R*R;
float triContrib = Rsq*tri.area();
cogContribution += triContrib;
}
}
return cogContribution;
}
__global__ void secondMomentKernel(float *d_levelSet, float *d_secondMoment, float3* d_cog, int *d_nodeID, float dh, Size3D size){
initSharedMemory();
Point3D p = getThread3D();
IntFunc3D f_nodeID(size, d_nodeID);
FltFunc3D f_secondMoment(size, d_secondMoment);
if(p>=0 && p < Point3D(size)) {
if(p < Point3D(size)-1) {
int nodeID = f_nodeID[p];
if(nodeID == -1) {
f_secondMoment[p] = 0;
} else {
float3 cog = d_cog[nodeID+1];
f_secondMoment[p] = cubeContribution(d_levelSet, cog, dh, p, size);
}
} else {
f_secondMoment[p] = 0;
}
}
}
float getSecondMoment_global(DevFloatChk& levelSet, DevFloatChk& secondMoment, DeviceChunk<float3>& cog, DevIntChk& nodeID, float dh, Size3D gridDims){
GpuConf3D conf(gridDims, 4);
secondMomentKernel<<<conf.grid(), conf.block(), MC_LOC_SIZE*64>>>(levelSet.getPtr(), secondMoment.getPtr(), cog.getPtr(), nodeID.getPtr(), dh, gridDims);
KERNEL_ERROR_CHECK("secondMomentKernel error");
return calculateSum(secondMoment.getPtr(), gridDims.vol());
}
CompStats extractComponentStatistics(float* d_statistics, DevIntChk& ccresult, int numOfComponents, commonns::Dimensions dims){
// Step 0.: copy the ccresult array to a brand new working array to do not mess it up the further steps of the segmentation.
int ccNelements = ccresult.getElements();
DevIntChk cCcresult(ccNelements);
ccresult.copy(cCcresult);
// Step 1.: Sort the ccresult_copy and the statistics array by considering it as ccresult->statistics map and sort it by considering the ccresult as the key
thrust::device_ptr<int> ccresult_copy_ptr = cCcresult.getTPtr();
thrust::device_ptr<float> statistics_ptr(d_statistics);
thrust::sort_by_key(ccresult_copy_ptr, ccresult_copy_ptr + ccNelements, statistics_ptr);
// Step 2.: Parallel reduce the values assigned to the same key to the output arrays
auto resultComponentIds = unique_ptr<DevIntChk>(new DevIntChk(ccNelements));
auto resultComponentContributions = unique_ptr<DevFloatChk>(new DevFloatChk(ccNelements));
thrust::reduce_by_key(
ccresult_copy_ptr, ccresult_copy_ptr + dims.s(),
statistics_ptr,
resultComponentIds.get()->getTPtr(),
resultComponentContributions.get()->getTPtr());
CompStats ret;
ret.first = std::move(resultComponentIds);
ret.second = std::move(resultComponentContributions);
return ret;
}
CompStatsF3 extractComponentStatistics(float3* d_statistics, DevIntChk& ccresult, int numOfComponents, commonns::Dimensions dims){
// Step 0.: copy the ccresult array to a brand new working array to do not mess it up the further steps of the segmentation.
int ccNelements = ccresult.getElements();
DevIntChk cCcresult(ccNelements);
ccresult.copy(cCcresult);
// Step 1.: Sort the ccresult_copy and the statistics array by considering it as ccresult->statistics map and sort it by considering the ccresult as the key
thrust::device_ptr<int> ccresult_copy_ptr = cCcresult.getTPtr();
thrust::device_ptr<float3> statistics_ptr(d_statistics);
thrust::sort_by_key(ccresult_copy_ptr, ccresult_copy_ptr + ccNelements, statistics_ptr);
// Step 2.: Parallel reduce the values assigned to the same key to the output arrays
auto resultComponentIds = unique_ptr<DevIntChk>(new DevIntChk(ccNelements));
auto resultComponentContributions = unique_ptr<DeviceChunk<float3> >(new DeviceChunk<float3>(ccNelements));
thrust::reduce_by_key(
ccresult_copy_ptr, ccresult_copy_ptr + dims.s(),
statistics_ptr,
resultComponentIds.get()->getTPtr(),
resultComponentContributions.get()->getTPtr(),
thrust::equal_to<int>(),
thrust::plus<float3>());
CompStatsF3 ret;
ret.first = std::move(resultComponentIds);
ret.second = std::move(resultComponentContributions);
return ret;
}
void printStats(CurveProps& curParams, int nComps, ostream& stream){
HostFloatChk hSurf(nComps), hVol(nComps), hSm(nComps);
HostChunk<float3> hCog(nComps);
curParams.surfContribs.second->copyHostN(hSurf, nComps);
curParams.volContribs.second->copyHostN(hVol, nComps);
curParams.cogContribs.second->copyHostN(hCog, nComps);
curParams.smContribs.second->copyHostN(hSm, nComps);
BOOST_LOG_TRIVIAL(info) << "Components: (n=" << nComps-1 << ")";
for(int i = 1; i < nComps; i++){
float cSurf = hSurf[i];
float cVol = hVol[i];
float3 cCog = hCog[i];
float pPlasma = pow(cSurf, 3.0f/2.0f)/cVol;
float cSm = hSm[i];
BOOST_LOG_TRIVIAL(info) <<
"\t(" << i <<
"): volume=" << cVol <<
", surface=" << cSurf <<
", plasma=" << pPlasma <<
", center of gravity=(" << cCog.x << "," << cCog.y << "," << cCog.z <<
") second moment=" << cSm;
}
BOOST_LOG_TRIVIAL(info) << "Total: " << "volume=" << curParams.vol << ", surface=" << curParams.surf;
}
CurveProps computeStats(unique_ptr<DevFloatChk>& ls1, unique_ptr<DevIntChk>& ccResult, int nComps, GridParams gParams){
DevFloatChk aux(gParams.gridSize.vol());
CurveProps curParams;
// Surface
curParams.surf = getSurface_global(*ls1, aux, gParams.gridRes, gParams.gridSize);
curParams.surfContribs = extractComponentStatistics(aux.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
//float* surfC = curParams.surfContribs.second->getPtr();
//fill(device, surfC, surfC+curParams.surfContribs.second->getElements(), curParams.surf);
// Volume
curParams.vol = getVolume_global(*ls1, aux, gParams.gridRes, gParams.gridSize);
curParams.volContribs = extractComponentStatistics(aux.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
//float* volC = curParams.volContribs.second->getPtr();
//fill(device, volC, volC+curParams.volContribs.second->getElements(), curParams.vol);
if(COMPUTE_ADVANCED_STATS){
// CoG
DeviceChunk<float3> auxf3(gParams.gridSize.vol());
curParams.cog = getCOG(*ls1, auxf3, gParams.gridRes, gParams.gridSize);
curParams.cogContribs = extractComponentStatistics(auxf3.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
// Second moment
curParams.sm = getSecondMoment_global(*ls1, aux, *(curParams.cogContribs.second), *ccResult, gParams.gridRes, gParams.gridSize);
curParams.smContribs = extractComponentStatistics(aux.getPtr(), *ccResult, nComps, gParams.gridSize.getd());
}
return curParams;
}
|
47f1005f0ec7dc739a47e9c924160fe16c0d1daa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
hipMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
hipMemcpy(u_GPU, u, sizeof(double)*Nparticles, hipMemcpyHostToDevice);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
hipLaunchKernelGGL(( kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
hipDeviceSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
hipMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
hipMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, hipMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
hipFree(u_GPU);
hipFree(CDF_GPU);
hipFree(yj_GPU);
hipFree(xj_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 47f1005f0ec7dc739a47e9c924160fe16c0d1daa.cu | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define PI acos(-1)
#define BLOCK_X 16
#define BLOCK_Y 16
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
cudaThreadSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
cudaFree(u_GPU);
cudaFree(CDF_GPU);
cudaFree(yj_GPU);
cudaFree(xj_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
02ced532d32646b8f3ded4b6648ba9510ae1e9e7.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
// device kernel
__global__
void helloWorldDevice() {
printf("Hello world from device %d!\n", threadIdx.x);
}
int main() {
printf("Hello world from host!\n");
// run kernel in 3 instances
hipLaunchKernelGGL(( helloWorldDevice) , dim3(1), dim3(3), 0, 0, );
hipDeviceSynchronize();
}
| 02ced532d32646b8f3ded4b6648ba9510ae1e9e7.cu | ๏ปฟ
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
#include <stdio.h>
#include <cuda_runtime.h>
// device kernel
__global__
void helloWorldDevice() {
printf("Hello world from device %d!\n", threadIdx.x);
}
int main() {
printf("Hello world from host!\n");
// run kernel in 3 instances
helloWorldDevice <<<1, 3>>> ();
cudaDeviceSynchronize();
}
|
e894b2906dd2b799a73c48de34df43e527f2ce16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <time.h>
#include <math.h>
unsigned myrand() {
unsigned a = rand()<<10;
unsigned b = rand();
return a + b;
}
__global__ void cuckooHash(
unsigned* hashTable,
unsigned* a, unsigned* b,
unsigned* entry,
unsigned* function,
unsigned* collision,
unsigned n_function, unsigned n, unsigned p)
{
unsigned k = blockDim.x * blockIdx.x + threadIdx.x;
unsigned num = function[k];
unsigned hashValue = ((a[num] * entry[k] + b[num]) % p) % n;
if (collision[k] == 1 || hashTable[hashValue] == 0xffffffff) {
hashTable[hashValue] = entry[k];
function[k] = (num + 1) % n_function;
}
}
__global__ void detectCollision(
unsigned* hashTable,
unsigned* a, unsigned* b,
unsigned* entry,
unsigned* function,
unsigned* collision,
unsigned n_function, unsigned n, unsigned p)
{
unsigned k = blockDim.x * blockIdx.x + threadIdx.x;
unsigned num = (function[k] - 1) % n_function;
unsigned hashValue = ((a[num] * entry[k] + b[num]) % p) % n;
if (hashTable[hashValue] != entry[k]) {
collision[k] = 1;
} else {
collision[k] = 0;
}
}
__global__ void lookup(
unsigned* hashTable,
unsigned* a, unsigned* b,
unsigned* searchEntry,
unsigned* dict,
unsigned n_function, unsigned n, unsigned p)
{
unsigned k = blockDim.x * blockIdx.x + threadIdx.x;
for (unsigned i = 0; i < n_function; i++) {
unsigned hashValue = ((a[i] * searchEntry[k] + b[i]) % p) % n;
if (hashTable[hashValue] == searchEntry[k]) {
dict[k] = 1;
break;
}
}
}
void generate_a_b(unsigned n_function, unsigned* a, unsigned* b) {
for (unsigned i = 0; i < n_function; i++) {
a[i] = rand() % 10;
b[i] = rand() % 10;
if (i != 0) {
while (a[i] == a[i - 1] || b[i] == b[i - 1]) {
a[i] = rand() % 10;
b[i] = rand() % 10;
}
}
}
/////////////For task 5
//a[0] = 232;
//b[0] = 0;
//////////DEBUG
std::cout << "a: ";
for (unsigned i = 0; i < n_function; i++) {
std::cout << a[i] << " ";
}
std::cout << std::endl;
std::cout << "b: ";
for (unsigned i = 0; i < n_function; i++) {
std::cout << b[i] << " ";
}
std::cout << std::endl;
}
int main() {
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
hipError_t err = hipSuccess;
unsigned task, n_function;
unsigned N;
unsigned entryLength;
unsigned p = 62353171; // a large prime
unsigned *entry;
unsigned *collision;
unsigned *hashTable;
unsigned *function;
unsigned *a, *b;
unsigned *dict;
unsigned *searchEntry;
unsigned *d_entry = NULL;
unsigned *d_collision = NULL;
unsigned *d_hashTable = NULL;
unsigned *d_function = NULL;
unsigned *d_a = NULL;
unsigned *d_b = NULL;
unsigned *d_dict = NULL;
unsigned *d_searchEntry = NULL;
unsigned limit;
unsigned blockNum;
unsigned blockSize = 512;
unsigned flag = 0;
unsigned iteration = 0;
unsigned test;
unsigned testNum;
unsigned testHashValue;
clock_t startTime;
clock_t endTime;
unsigned sum = 0;
std::cout << "How many hash functions?" << std::endl;
std::cin >> n_function;
std::cout << "Which task?" << std::endl;
std::cin >> task;
srand(time(NULL));
switch (task)
{
case 1:
N = pow(2, 25); //33554432
limit = ceil(4 * log10((double)N));
unsigned s;
std::cout << "Input s:";
std::cin >> s;
entryLength = pow(2, s);
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = hipMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = hipMemcpy(collision, d_collision, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << hipGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
//////////test
//hipMemcpy(function, d_function, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
//hipMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), hipMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
case 2:
N = pow(2, 25); //33554432
limit = ceil(4 * log10((double)N));
unsigned ii;
std::cout << "Input i:";
std::cin >> ii;
entryLength = pow(2, 24);
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = hipMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
hipLaunchKernelGGL(( cuckooHash) , dim3(blockNum), dim3(blockSize) , 0, 0, d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
hipLaunchKernelGGL(( detectCollision) , dim3(blockNum), dim3(blockSize) , 0, 0, d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = hipMemcpy(collision, d_collision, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
std::cout << "Hash Done!" << std::endl;
//////////////////////////////////
///////// Look up part
std::cout << "Initilizing lookup part..." << std::endl;
hipMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), hipMemcpyDeviceToHost);
searchEntry = new unsigned[entryLength];
dict = new unsigned[entryLength];
//add random key in to search entry
for (unsigned i = 0; i < entryLength; i++) {
//std::cout << myrand() << std::endl;
if (i < entryLength*(1 - ii*0.1)) {
unsigned randIdx = myrand() % entryLength;
//std::cout << randIdx << " ";
searchEntry[i] = entry[randIdx];
} else {
searchEntry[i] = myrand();
}
}
//std::cout << std::endl;
//store if find
memset(dict, 0, entryLength * sizeof(unsigned));
//allocate cuda memory for search
err = hipMalloc((void**)&d_searchEntry, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "allocate searchEntry fail" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_dict, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "allocate dict fail" << std::endl;
goto Error;
}
//copy data from host to device
err = hipMemcpy(d_searchEntry, searchEntry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "Fail to copy search Entry to device" << std::endl;
goto Error;
}
err = hipMemcpy(d_dict, dict, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "Fail to copy dict to device" << std::endl;
goto Error;
}
std::cout << "Start lookup keys..." << std::endl;
///////////////DEBUG
//sum = 0;
//for (unsigned i = 0; i < entryLength; i++) {
// for (unsigned j = 0; j < n_function; j++) {
// unsigned hashValue = ((a[j] * searchEntry[i] + b[j]) % p) % N;
// if (hashTable[hashValue] == searchEntry[i]) {
// sum++;
// break;
// }
// }
//}
//std::cout << "--------------" << sum/entryLength << std::endl;
startTime = clock();
hipLaunchKernelGGL(( lookup) , dim3(blockNum), dim3(blockSize) , 0, 0, d_hashTable,
d_a, d_b,
d_searchEntry,
d_dict,
n_function, N, p);
endTime = clock();
std::cout << "Lookup done" << std::endl;
//Copy dict back to host
err = hipMemcpy(dict, d_dict, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cout << "Fail to copy dict back to device" << std::endl;
goto Error;
}
sum = 0;
for (unsigned i = 0; i < entryLength; i++) {
sum += dict[i];
}
std::cout << "Hash Hit: " << (double)sum / (double)entryLength * 100 << "%";
std::cout << " with " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
}
break;
case 3:
//N = pow(2, 25); //33554432
float alpha;
std::cout << "input alpha: ";
std::cin >> alpha;
entryLength = pow(2, 24);
std::cout << entryLength << " ";
N = alpha*entryLength;
std::cout << N << " ";
limit = ceil(4 * log10((double)N));
std::cout << limit << std::endl;
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = hipMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = hipMemcpy(collision, d_collision, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << hipGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
////////////test
//hipMemcpy(function, d_function, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
//hipMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), hipMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
case 4:
float beta;
std::cout << "input bound coefficient: ";
std::cin >> beta;
entryLength = pow(2, 24);
std::cout << entryLength << " ";
N = 1.2*entryLength;
std::cout << N << " ";
limit = ceil(beta * log10((double)N));
std::cout << limit << std::endl;
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = hipMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = hipMemcpy(collision, d_collision, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << hipGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
//////////test
//hipMemcpy(function, d_function, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
//hipMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), hipMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
case 5:
entryLength = pow(2, 24);
std::cout << entryLength << " ";
N = 1.2*entryLength;
std::cout << N << " ";
limit = ceil(6 * log10((double)N));
std::cout << limit << std::endl;
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = hipMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = hipMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != hipSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = hipMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = hipMemcpy(d_a, a, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_b, b, n_function * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_entry, entry, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_function, function, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = hipMemcpy(d_collision, collision, entryLength * sizeof(unsigned), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = hipMemcpy(collision, d_collision, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << hipGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
//////////test
//hipMemcpy(function, d_function, entryLength * sizeof(unsigned), hipMemcpyDeviceToHost);
//hipMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), hipMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
default:
std::cout << "No such task. Please run again." << std::endl;
goto Error;
break;
}
Error:
hipFree((void**)&d_a);
hipFree((void**)&d_b);
hipFree((void**)&d_collision);
hipFree((void**)&d_dict);
hipFree((void**)&d_entry);
hipFree((void**)&d_function);
hipFree((void**)&d_searchEntry);
hipFree((void**)&d_hashTable);
system("pause");
return 0;
}
| e894b2906dd2b799a73c48de34df43e527f2ce16.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <time.h>
#include <math.h>
unsigned myrand() {
unsigned a = rand()<<10;
unsigned b = rand();
return a + b;
}
__global__ void cuckooHash(
unsigned* hashTable,
unsigned* a, unsigned* b,
unsigned* entry,
unsigned* function,
unsigned* collision,
unsigned n_function, unsigned n, unsigned p)
{
unsigned k = blockDim.x * blockIdx.x + threadIdx.x;
unsigned num = function[k];
unsigned hashValue = ((a[num] * entry[k] + b[num]) % p) % n;
if (collision[k] == 1 || hashTable[hashValue] == 0xffffffff) {
hashTable[hashValue] = entry[k];
function[k] = (num + 1) % n_function;
}
}
__global__ void detectCollision(
unsigned* hashTable,
unsigned* a, unsigned* b,
unsigned* entry,
unsigned* function,
unsigned* collision,
unsigned n_function, unsigned n, unsigned p)
{
unsigned k = blockDim.x * blockIdx.x + threadIdx.x;
unsigned num = (function[k] - 1) % n_function;
unsigned hashValue = ((a[num] * entry[k] + b[num]) % p) % n;
if (hashTable[hashValue] != entry[k]) {
collision[k] = 1;
} else {
collision[k] = 0;
}
}
__global__ void lookup(
unsigned* hashTable,
unsigned* a, unsigned* b,
unsigned* searchEntry,
unsigned* dict,
unsigned n_function, unsigned n, unsigned p)
{
unsigned k = blockDim.x * blockIdx.x + threadIdx.x;
for (unsigned i = 0; i < n_function; i++) {
unsigned hashValue = ((a[i] * searchEntry[k] + b[i]) % p) % n;
if (hashTable[hashValue] == searchEntry[k]) {
dict[k] = 1;
break;
}
}
}
void generate_a_b(unsigned n_function, unsigned* a, unsigned* b) {
for (unsigned i = 0; i < n_function; i++) {
a[i] = rand() % 10;
b[i] = rand() % 10;
if (i != 0) {
while (a[i] == a[i - 1] || b[i] == b[i - 1]) {
a[i] = rand() % 10;
b[i] = rand() % 10;
}
}
}
/////////////For task 5
//a[0] = 232;
//b[0] = 0;
//////////DEBUG
std::cout << "a: ";
for (unsigned i = 0; i < n_function; i++) {
std::cout << a[i] << " ";
}
std::cout << std::endl;
std::cout << "b: ";
for (unsigned i = 0; i < n_function; i++) {
std::cout << b[i] << " ";
}
std::cout << std::endl;
}
int main() {
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
//std::cout << myrand() << std::endl;
cudaError_t err = cudaSuccess;
unsigned task, n_function;
unsigned N;
unsigned entryLength;
unsigned p = 62353171; // a large prime
unsigned *entry;
unsigned *collision;
unsigned *hashTable;
unsigned *function;
unsigned *a, *b;
unsigned *dict;
unsigned *searchEntry;
unsigned *d_entry = NULL;
unsigned *d_collision = NULL;
unsigned *d_hashTable = NULL;
unsigned *d_function = NULL;
unsigned *d_a = NULL;
unsigned *d_b = NULL;
unsigned *d_dict = NULL;
unsigned *d_searchEntry = NULL;
unsigned limit;
unsigned blockNum;
unsigned blockSize = 512;
unsigned flag = 0;
unsigned iteration = 0;
unsigned test;
unsigned testNum;
unsigned testHashValue;
clock_t startTime;
clock_t endTime;
unsigned sum = 0;
std::cout << "How many hash functions?" << std::endl;
std::cin >> n_function;
std::cout << "Which task?" << std::endl;
std::cin >> task;
srand(time(NULL));
switch (task)
{
case 1:
N = pow(2, 25); //33554432
limit = ceil(4 * log10((double)N));
unsigned s;
std::cout << "Input s:";
std::cin >> s;
entryLength = pow(2, s);
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = cudaMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = cudaMemcpy(collision, d_collision, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << cudaGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
//////////test
//cudaMemcpy(function, d_function, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
//cudaMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
case 2:
N = pow(2, 25); //33554432
limit = ceil(4 * log10((double)N));
unsigned ii;
std::cout << "Input i:";
std::cin >> ii;
entryLength = pow(2, 24);
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = cudaMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash <<< blockNum, blockSize >>> (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision <<< blockNum, blockSize >>> (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = cudaMemcpy(collision, d_collision, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
std::cout << "Hash Done!" << std::endl;
//////////////////////////////////
///////// Look up part
std::cout << "Initilizing lookup part..." << std::endl;
cudaMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
searchEntry = new unsigned[entryLength];
dict = new unsigned[entryLength];
//add random key in to search entry
for (unsigned i = 0; i < entryLength; i++) {
//std::cout << myrand() << std::endl;
if (i < entryLength*(1 - ii*0.1)) {
unsigned randIdx = myrand() % entryLength;
//std::cout << randIdx << " ";
searchEntry[i] = entry[randIdx];
} else {
searchEntry[i] = myrand();
}
}
//std::cout << std::endl;
//store if find
memset(dict, 0, entryLength * sizeof(unsigned));
//allocate cuda memory for search
err = cudaMalloc((void**)&d_searchEntry, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "allocate searchEntry fail" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_dict, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "allocate dict fail" << std::endl;
goto Error;
}
//copy data from host to device
err = cudaMemcpy(d_searchEntry, searchEntry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "Fail to copy search Entry to device" << std::endl;
goto Error;
}
err = cudaMemcpy(d_dict, dict, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "Fail to copy dict to device" << std::endl;
goto Error;
}
std::cout << "Start lookup keys..." << std::endl;
///////////////DEBUG
//sum = 0;
//for (unsigned i = 0; i < entryLength; i++) {
// for (unsigned j = 0; j < n_function; j++) {
// unsigned hashValue = ((a[j] * searchEntry[i] + b[j]) % p) % N;
// if (hashTable[hashValue] == searchEntry[i]) {
// sum++;
// break;
// }
// }
//}
//std::cout << "--------------" << sum/entryLength << std::endl;
startTime = clock();
lookup <<< blockNum, blockSize >>> (d_hashTable,
d_a, d_b,
d_searchEntry,
d_dict,
n_function, N, p);
endTime = clock();
std::cout << "Lookup done" << std::endl;
//Copy dict back to host
err = cudaMemcpy(dict, d_dict, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cout << "Fail to copy dict back to device" << std::endl;
goto Error;
}
sum = 0;
for (unsigned i = 0; i < entryLength; i++) {
sum += dict[i];
}
std::cout << "Hash Hit: " << (double)sum / (double)entryLength * 100 << "%";
std::cout << " with " << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
}
break;
case 3:
//N = pow(2, 25); //33554432
float alpha;
std::cout << "input alpha: ";
std::cin >> alpha;
entryLength = pow(2, 24);
std::cout << entryLength << " ";
N = alpha*entryLength;
std::cout << N << " ";
limit = ceil(4 * log10((double)N));
std::cout << limit << std::endl;
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = cudaMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = cudaMemcpy(collision, d_collision, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << cudaGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
////////////test
//cudaMemcpy(function, d_function, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
//cudaMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
case 4:
float beta;
std::cout << "input bound coefficient: ";
std::cin >> beta;
entryLength = pow(2, 24);
std::cout << entryLength << " ";
N = 1.2*entryLength;
std::cout << N << " ";
limit = ceil(beta * log10((double)N));
std::cout << limit << std::endl;
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = cudaMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = cudaMemcpy(collision, d_collision, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << cudaGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
//////////test
//cudaMemcpy(function, d_function, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
//cudaMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
case 5:
entryLength = pow(2, 24);
std::cout << entryLength << " ";
N = 1.2*entryLength;
std::cout << N << " ";
limit = ceil(6 * log10((double)N));
std::cout << limit << std::endl;
for (unsigned z = 0; z < 5; z++) {
blockNum = ceil((double)entryLength / blockSize);
entry = new unsigned[entryLength];
std::cout << "Generating random numbers between 0~10000000..." << std::endl;
for (unsigned i = 0; i < entryLength; i++) {
entry[i] = myrand() % 10000000;
}
std::cout << "Generating a,b..." << std::endl;
a = new unsigned[n_function];
b = new unsigned[n_function];
generate_a_b(n_function, a, b);
//for (unsigned i = 0; i < n_function; i++) {
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// if (i != 0) {
// while (a[i]==a[i-1] || b[i]==b[i-1]){
// a[i] = rand() % 10;
// b[i] = rand() % 10;
// }
// }
//}
std::cout << "Initilizing hashTable..." << std::endl;
hashTable = new unsigned[N];
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
std::cout << "Initilizing collisionTable..." << std::endl;
collision = new unsigned[entryLength];
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Initilizing functionIndex..." << std::endl;
function = new unsigned[entryLength];
memset(function, 0, entryLength * sizeof(unsigned));
std::cout << "Allocating device memory..." << std::endl;
err = cudaMalloc((void**)&d_entry, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating entry[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_a, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating a[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_b, n_function * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating b[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_collision, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating collision[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_function, entryLength * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating functionIndex[] failed" << std::endl;
goto Error;
}
err = cudaMalloc((void**)&d_hashTable, N * sizeof(unsigned));
if (err != cudaSuccess) {
std::cout << "-->Allocating hashTable[] failed" << std::endl;
goto Error;
}
std::cout << "Copying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
iteration = 0;
startTime = clock();
do {
flag = 0;
//Restarting hash
if (iteration == limit) {
iteration = 0;
std::cout << ".........Rehash........." << std::endl;
generate_a_b(n_function, a, b);
memset(hashTable, 0xffffffff, N * sizeof(unsigned));
memset(function, 0, entryLength * sizeof(unsigned));
memset(collision, 0, entryLength * sizeof(unsigned));
std::cout << "Recopying memory from host to device..." << std::endl;
err = cudaMemcpy(d_hashTable, hashTable, N * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy hashTable" << std::endl;
goto Error;
}
err = cudaMemcpy(d_a, a, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy a[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_b, b, n_function * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy b[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_entry, entry, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy entry[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_function, function, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy functionIndex[]" << std::endl;
goto Error;
}
err = cudaMemcpy(d_collision, collision, entryLength * sizeof(unsigned), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cout << "-->Fail to copy collision[]" << std::endl;
goto Error;
}
}
iteration++;
cuckooHash << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
detectCollision << < blockNum, blockSize >> > (d_hashTable,
d_a, d_b,
d_entry,
d_function,
d_collision,
n_function, N, p);
std::cout << "Finish hash " << iteration << " times" << std::endl;
// Copy collison back
err = cudaMemcpy(collision, d_collision, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cout << "Copy collison failed from device to host" << std::endl;
std::cout << cudaGetErrorString(err) << std::endl;
goto Error;
}
for (unsigned i = 0; i < entryLength; i++) {
flag += collision[i];
}
std::cout << flag << " collisions" << std::endl;
} while (flag != 0);
endTime = clock();
std::cout << "Hash Done!" << std::endl;
std::cout << "time:" << (double)(endTime - startTime) / CLOCKS_PER_SEC << "s" << std::endl;
std::cout << std::endl;
//////////test
//cudaMemcpy(function, d_function, entryLength * sizeof(unsigned), cudaMemcpyDeviceToHost);
//cudaMemcpy(hashTable, d_hashTable, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
//std::cout << "Checking Hash..." << std::endl;
//for (unsigned i = 0; i < 10; i++) {
// test = myrand() % entryLength;
// testNum = (function[test] - 1) % n_function;
// testHashValue = ((a[testNum] * entry[test] + b[testNum]) % p) % N;
// std::cout << "--" << entry[test] << " " << testNum << " ";
// if (hashTable[testHashValue] == entry[test]) {
// std::cout << "correct" << std::endl;
// } else {
// std::cout << "incorrect" << std::endl;
// }
//}
}
break;
default:
std::cout << "No such task. Please run again." << std::endl;
goto Error;
break;
}
Error:
cudaFree((void**)&d_a);
cudaFree((void**)&d_b);
cudaFree((void**)&d_collision);
cudaFree((void**)&d_dict);
cudaFree((void**)&d_entry);
cudaFree((void**)&d_function);
cudaFree((void**)&d_searchEntry);
cudaFree((void**)&d_hashTable);
system("pause");
return 0;
}
|
fafab49cac99dcbc32b897b3c0ba13d2989e65b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/all.h>
#include <torch/python.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_pixel_and_weights(
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> value,
scalar_t x, scalar_t y, int H, int W, const int B, const int C, const int G){
// scalar_t ltx = floor(x+0.00001);
// scalar_t lty = floor(y+0.00001);
// scalar_t rtx = ceil(x+0.00001);
// scalar_t rty = floor(y+0.00001);
// scalar_t lbx = floor(x+0.00001);
// scalar_t lby = ceil(y+0.00001);
// scalar_t rbx = ceil(x+0.00001);
// scalar_t rby = ceil(y+0.00001);
scalar_t lower_x = floor(x+0.00001);
scalar_t upper_x = lower_x + 1;
scalar_t lower_y = floor(y+0.00001);
scalar_t upper_y = lower_y + 1;
// printf("Coord: %d %d %d %d\n",int(ltx), int(lty), int(rtx), int(rty));
if (lower_x<0 || upper_x>(W-1) || lower_y<0 || upper_y>(H-1)){
return 0;
}
scalar_t lt, rt, lb, rb;
lt = value[B][G][C][int(lower_y)][int(lower_x)];
rt = value[B][G][C][int(lower_y)][int(upper_x)];
lb = value[B][G][C][int(upper_y)][int(lower_x)];
rb = value[B][G][C][int(upper_y)][int(upper_x)];
scalar_t w1,w2,w3,w4;
w1 = (upper_x - x) * (upper_y - y);
w2 = (x - lower_x) * (upper_y - y);
w3 = (upper_x - x) * (y - lower_y);
w4 = (x - lower_x) * (y - lower_y);
// printf("Value:\t%f %f %f %f\nWeight:\t%f %f %f %f\n", lt, rt, lb, rb, w1, w2, w3, w4);
// return {lt, rt, lb, rb, w1, w2, w3, w4};
return lt*w1 + rt*w2 + lb*w3 + rb*w4;
}
template <typename scalar_t>
__global__ void dcn_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> value,
const torch::PackedTensorAccessor32<scalar_t, 6, torch::RestrictPtrTraits> grid,
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> mask,
torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> result,
const int H, const int W, const int Hout, const int Wout, const int Pmax){
// we put channel and batch to block here. Combining to thread will boost speed?
const int B = blockIdx.y;
const int C = blockIdx.x;
const int G = blockIdx.z;
const int N = blockDim.x;
const int n_thread = threadIdx.x;
int i,j;
scalar_t x, y, v=0;
// std::vector<scalar_t> buff;
for (i=n_thread; i<Hout*Wout; i=i+N){
x = i%Wout;
y = i/Wout;
for (j=0;j<Pmax;j++){
v += get_pixel_and_weights(value, grid[B][y][x][G][j][0]*(W-1), grid[B][y][x][G][j][1]*(H-1), H, W, B, C, G) * mask[B][y][x][G][j];
}
// printf("%f %d %d %d\n", v, B, C, G);
// v = buff[0]*buff[4] + buff[1]*buff[5] + buff[2]*buff[6] + buff[3]*buff[7];
result[B][G][C][y][x] = v;
}
}
torch::Tensor dcn_cuda(torch::Tensor value, torch::Tensor grid, torch::Tensor mask){
// we use the grid within range 0-1
//
AT_ASSERTM(value.size(0)==grid.size(0), "Batch size of value and grid should be the same");
AT_ASSERTM(mask.size(0)==grid.size(0), "Batch size of mask and grid should be the same");
AT_ASSERTM(value.size(1)==grid.size(3), "Group number of value and grid should be the same");
AT_ASSERTM(value.size(1)==mask.size(3), "Group number of value and mask should be the same");
AT_ASSERTM(mask.size(4)==grid.size(4), "Point number of mask and grid should be the same");
AT_ASSERTM(mask.size(1)==grid.size(1), "Height of mask and grid should be the same");
AT_ASSERTM(mask.size(2)==grid.size(2), "Width of mask and grid should be the same");
const int B = value.size(0);
const int G = value.size(1);
const int C = value.size(2);
const int H = value.size(3);
const int W = value.size(4);
const int Hout = grid.size(1);
const int Wout = grid.size(2);
const int P = grid.size(4);
const int threads = 1024;
const dim3 blocks(C,B,G);
auto result = torch::zeros({B, G, C, Hout, Wout}, value.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "generate grid sample", ([&] {
hipLaunchKernelGGL(( dcn_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
value.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
grid.packed_accessor32<scalar_t, 6, torch::RestrictPtrTraits>(),
mask.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
H, W, Hout, Wout, P);
}));
return result;
}
| fafab49cac99dcbc32b897b3c0ba13d2989e65b8.cu | #include <torch/all.h>
#include <torch/python.h>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
template <typename scalar_t>
__device__ __forceinline__ scalar_t get_pixel_and_weights(
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> value,
scalar_t x, scalar_t y, int H, int W, const int B, const int C, const int G){
// scalar_t ltx = floor(x+0.00001);
// scalar_t lty = floor(y+0.00001);
// scalar_t rtx = ceil(x+0.00001);
// scalar_t rty = floor(y+0.00001);
// scalar_t lbx = floor(x+0.00001);
// scalar_t lby = ceil(y+0.00001);
// scalar_t rbx = ceil(x+0.00001);
// scalar_t rby = ceil(y+0.00001);
scalar_t lower_x = floor(x+0.00001);
scalar_t upper_x = lower_x + 1;
scalar_t lower_y = floor(y+0.00001);
scalar_t upper_y = lower_y + 1;
// printf("Coord: %d %d %d %d\n",int(ltx), int(lty), int(rtx), int(rty));
if (lower_x<0 || upper_x>(W-1) || lower_y<0 || upper_y>(H-1)){
return 0;
}
scalar_t lt, rt, lb, rb;
lt = value[B][G][C][int(lower_y)][int(lower_x)];
rt = value[B][G][C][int(lower_y)][int(upper_x)];
lb = value[B][G][C][int(upper_y)][int(lower_x)];
rb = value[B][G][C][int(upper_y)][int(upper_x)];
scalar_t w1,w2,w3,w4;
w1 = (upper_x - x) * (upper_y - y);
w2 = (x - lower_x) * (upper_y - y);
w3 = (upper_x - x) * (y - lower_y);
w4 = (x - lower_x) * (y - lower_y);
// printf("Value:\t%f %f %f %f\nWeight:\t%f %f %f %f\n", lt, rt, lb, rb, w1, w2, w3, w4);
// return {lt, rt, lb, rb, w1, w2, w3, w4};
return lt*w1 + rt*w2 + lb*w3 + rb*w4;
}
template <typename scalar_t>
__global__ void dcn_cuda_kernel(
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> value,
const torch::PackedTensorAccessor32<scalar_t, 6, torch::RestrictPtrTraits> grid,
const torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> mask,
torch::PackedTensorAccessor32<scalar_t, 5, torch::RestrictPtrTraits> result,
const int H, const int W, const int Hout, const int Wout, const int Pmax){
// we put channel and batch to block here. Combining to thread will boost speed?
const int B = blockIdx.y;
const int C = blockIdx.x;
const int G = blockIdx.z;
const int N = blockDim.x;
const int n_thread = threadIdx.x;
int i,j;
scalar_t x, y, v=0;
// std::vector<scalar_t> buff;
for (i=n_thread; i<Hout*Wout; i=i+N){
x = i%Wout;
y = i/Wout;
for (j=0;j<Pmax;j++){
v += get_pixel_and_weights(value, grid[B][y][x][G][j][0]*(W-1), grid[B][y][x][G][j][1]*(H-1), H, W, B, C, G) * mask[B][y][x][G][j];
}
// printf("%f %d %d %d\n", v, B, C, G);
// v = buff[0]*buff[4] + buff[1]*buff[5] + buff[2]*buff[6] + buff[3]*buff[7];
result[B][G][C][y][x] = v;
}
}
torch::Tensor dcn_cuda(torch::Tensor value, torch::Tensor grid, torch::Tensor mask){
// we use the grid within range 0-1
//
AT_ASSERTM(value.size(0)==grid.size(0), "Batch size of value and grid should be the same");
AT_ASSERTM(mask.size(0)==grid.size(0), "Batch size of mask and grid should be the same");
AT_ASSERTM(value.size(1)==grid.size(3), "Group number of value and grid should be the same");
AT_ASSERTM(value.size(1)==mask.size(3), "Group number of value and mask should be the same");
AT_ASSERTM(mask.size(4)==grid.size(4), "Point number of mask and grid should be the same");
AT_ASSERTM(mask.size(1)==grid.size(1), "Height of mask and grid should be the same");
AT_ASSERTM(mask.size(2)==grid.size(2), "Width of mask and grid should be the same");
const int B = value.size(0);
const int G = value.size(1);
const int C = value.size(2);
const int H = value.size(3);
const int W = value.size(4);
const int Hout = grid.size(1);
const int Wout = grid.size(2);
const int P = grid.size(4);
const int threads = 1024;
const dim3 blocks(C,B,G);
auto result = torch::zeros({B, G, C, Hout, Wout}, value.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(value.type(), "generate grid sample", ([&] {
dcn_cuda_kernel<scalar_t><<<blocks, threads>>>(
value.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
grid.packed_accessor32<scalar_t, 6, torch::RestrictPtrTraits>(),
mask.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 5, torch::RestrictPtrTraits>(),
H, W, Hout, Wout, P);
}));
return result;
}
|
274f22af7527c17e16754902cb2486f3da2d1874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 1000;
int *a;
size_t size = N * sizeof(int);
/*
* Use `hipMallocManaged` to allocate pointer `a` available
* on both the host and the device.
*/
hipMallocManaged(&a, size);
init(a, N);
size_t threads_per_block = 256;
// to allocate the right number of blocks
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Use `hipFree` to free memory allocated
* with `hipMallocManaged`.
*/
hipFree(a);
} | 274f22af7527c17e16754902cb2486f3da2d1874.cu | #include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 1000;
int *a;
size_t size = N * sizeof(int);
/*
* Use `cudaMallocManaged` to allocate pointer `a` available
* on both the host and the device.
*/
cudaMallocManaged(&a, size);
init(a, N);
size_t threads_per_block = 256;
// to allocate the right number of blocks
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Use `cudaFree` to free memory allocated
* with `cudaMallocManaged`.
*/
cudaFree(a);
} |
fbb51cbe8b3b0ac753d56accf61ef9c25b663605.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDilatedMaxPooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
int batchSize = 1;
if (ndim == 4) {
batchSize = input->size(0);
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t nInputRows = input->size(dimh);
int64_t nInputCols = input->size(dimw);
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, 4, 0, batchSize);
THCUNN_check_dim_size_indices(state, indices, 4, 1, nOutputPlane);
THCUNN_check_dim_size_indices(state, indices, 4, 2, nOutputRows);
THCUNN_check_dim_size_indices(state, indices, 4, 3, nOutputCols);
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCUNN_resizeAs_indices(state, indices, output);
THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
hipLaunchKernelGGL(( MaxPoolForward<real, accreal>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data);
THCudaCheck(hipGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (THTensor_nDimensionLegacyAll(input) == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
dim3 grid;
int imgcount = nInputCols * nInputRows;
const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS;
grid.x = blocks;
grid.y = batchSize;
grid.z = nInputPlane;
uint64_t maxGridY = THCState_getCurrentDeviceProperties(state)->maxGridSize[1];
uint64_t maxGridZ = THCState_getCurrentDeviceProperties(state)->maxGridSize[2];
if (maxGridY < grid.y) grid.y = maxGridY;
if (maxGridZ < grid.z) grid.z = maxGridZ;
hipLaunchKernelGGL(( MaxPoolBackward<real, accreal>) , dim3(grid), dim3(BACKWARD_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
THCIndexTensor_(data)(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
THCTensor_(data)(state, gradInput));
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, gradOutput);
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| fbb51cbe8b3b0ac753d56accf61ef9c25b663605.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialDilatedMaxPooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
int batchSize = 1;
if (ndim == 4) {
batchSize = input->size(0);
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t nInputRows = input->size(dimh);
int64_t nInputCols = input->size(dimw);
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
if (indices != NULL) {
THCUNN_check_dim_size_indices(state, indices, 4, 0, batchSize);
THCUNN_check_dim_size_indices(state, indices, 4, 1, nOutputPlane);
THCUNN_check_dim_size_indices(state, indices, 4, 2, nOutputRows);
THCUNN_check_dim_size_indices(state, indices, 4, 3, nOutputCols);
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCUNN_resizeAs_indices(state, indices, output);
THCIndex_t* indices_data = THCIndexTensor_(data)(state, indices);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
MaxPoolForward<real, accreal> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data);
THCudaCheck(cudaGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
int dilationW, int dilationH,
bool ceil_mode)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, indices, gradInput);
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(state, input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (THTensor_nDimensionLegacyAll(input) == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - (dilationW * (kW - 1) + 1) + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - (dilationH * (kH - 1) + 1) + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
dim3 grid;
int imgcount = nInputCols * nInputRows;
const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS;
grid.x = blocks;
grid.y = batchSize;
grid.z = nInputPlane;
uint64_t maxGridY = THCState_getCurrentDeviceProperties(state)->maxGridSize[1];
uint64_t maxGridZ = THCState_getCurrentDeviceProperties(state)->maxGridSize[2];
if (maxGridY < grid.y) grid.y = maxGridY;
if (maxGridZ < grid.z) grid.z = maxGridZ;
MaxPoolBackward<real, accreal> <<< grid, BACKWARD_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
THCIndexTensor_(data)(state, indices),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
THCTensor_(data)(state, gradInput));
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, gradOutput);
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
27f88072061fa4b96497fd3f186995e5aee38301.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
#include "Refine.hxx"
#define T_BLOCKS 1
#define T_THREADS 64
__device__ void printList(ListElt<int>* list, int size) {
if(threadIdx.x==0 && blockIdx.x==0) {
printf("size:%d\n", size);
for(int i=0; i<size; i++) {
printf("%d, %d, %d\n", list[i].id, list[i].dist, list[i].checkedFlag);
}
printf("\n");
}
__syncthreads();
}
__global__ void test_removeDuplicates(ListElt<int>* listMem) {
ListElt<int>* list = &listMem[blockIdx.x*LISTSIZE*2];
typedef cub::BlockRadixSort<int, T_THREADS, LISTCAP/T_THREADS, ListElt<int>> BlockRadixSortT;
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
__shared__ int border_vals[T_THREADS];
for(int i=threadIdx.x; i<LISTSIZE; i+=blockDim.x) {
list[i].id = i;
list[i+LISTSIZE].id = i;
list[i].dist = -i;
list[i+LISTSIZE].dist = -i;
list[i].checkedFlag=true;
list[i+LISTSIZE].checkedFlag=false;
}
__syncthreads();
int listSize = LISTSIZE*2;
__syncthreads();
printList(list, listSize);
__syncthreads();
sortListById<int, int, 0, T_THREADS>(list, &listSize, &temp_storage);
removeDuplicatesAndCompact<int, int, 0, T_THREADS>(list, &listSize, &temp_storage, border_vals);
printList(list, listSize);
}
int main(int argc, char* argv[]) {
ListElt<int>* listMem;
hipMalloc(&listMem, T_BLOCKS*LISTSIZE*2*sizeof(ListElt<int>));
test_removeDuplicates<<<T_BLOCKS, T_THREADS>>>(listMem);
printf("Error: %d\n", hipDeviceSynchronize());
}
*/
| 27f88072061fa4b96497fd3f186995e5aee38301.cu | /*
#include "Refine.hxx"
#define T_BLOCKS 1
#define T_THREADS 64
__device__ void printList(ListElt<int>* list, int size) {
if(threadIdx.x==0 && blockIdx.x==0) {
printf("size:%d\n", size);
for(int i=0; i<size; i++) {
printf("%d, %d, %d\n", list[i].id, list[i].dist, list[i].checkedFlag);
}
printf("\n");
}
__syncthreads();
}
__global__ void test_removeDuplicates(ListElt<int>* listMem) {
ListElt<int>* list = &listMem[blockIdx.x*LISTSIZE*2];
typedef cub::BlockRadixSort<int, T_THREADS, LISTCAP/T_THREADS, ListElt<int>> BlockRadixSortT;
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
__shared__ int border_vals[T_THREADS];
for(int i=threadIdx.x; i<LISTSIZE; i+=blockDim.x) {
list[i].id = i;
list[i+LISTSIZE].id = i;
list[i].dist = -i;
list[i+LISTSIZE].dist = -i;
list[i].checkedFlag=true;
list[i+LISTSIZE].checkedFlag=false;
}
__syncthreads();
int listSize = LISTSIZE*2;
__syncthreads();
printList(list, listSize);
__syncthreads();
sortListById<int, int, 0, T_THREADS>(list, &listSize, &temp_storage);
removeDuplicatesAndCompact<int, int, 0, T_THREADS>(list, &listSize, &temp_storage, border_vals);
printList(list, listSize);
}
int main(int argc, char* argv[]) {
ListElt<int>* listMem;
cudaMalloc(&listMem, T_BLOCKS*LISTSIZE*2*sizeof(ListElt<int>));
test_removeDuplicates<<<T_BLOCKS, T_THREADS>>>(listMem);
printf("Error: %d\n", cudaDeviceSynchronize());
}
*/
|
4a2e90399a95e461cfdb26146fb2dd84f7f75449.hip | // !!! This is a file automatically generated by hipify!!!
/* --------------------------------------------------------------------
OPTIMIZED CODE MAKING USE OF REGISTERS + SHARED MEMORY
----------------------------------------------------------------------*/
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void heat (float * __restrict__ in, float * __restrict__ out1, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-4);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-4);
int j = max (j0, 0) + (int)(threadIdx.y);
//Declarations
float reg_in_m1=0, __shared__ sh_in_c0[32][32], reg_in_p1=0;
float reg_out_m2=0, __shared__ sh_out_m1[32][32], reg_out_c0=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = in[0 + j*N + i];
sh_in_c0[j-j0][i-i0] = in[1*M*N + j*N + i];
}
//Rest of the computation
for (int k=1; k<=L-2; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_p1 = in[(k+1)*M*N + j*N + i];
}
__syncthreads ();
if (j >= max (j0+1, 1) & j <= min (j0+blockdim_j-2, M-2) & i >= max (i0+1, 1) & i <= min (i0+blockdim_i-2, N-2)) {
reg_out_c0 = ((((0.125f * ((reg_in_p1 - (2.0f * sh_in_c0[j-j0][i-i0])) + reg_in_m1)) + (0.125f * ((sh_in_c0[j-j0+1][i-i0] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0-1][i-i0]))) + (0.125f * ((sh_in_c0[j-j0][i-i0+1] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0][i-i0-1]))) + sh_in_c0[j-j0][i-i0]);
}
if (j >= max (j0+2, 1) & j <= min (j0+blockdim_j-3, M-2) & i >= max (i0+2, 1) & i <= min (i0+blockdim_i-3, N-2)) {
out1[max(k-1,0)*M*N + j*N + i] = ((((0.125f * ((reg_out_c0 - (2.0f * sh_out_m1[j-j0][i-i0])) + reg_out_m2)) + (0.125f * ((sh_out_m1[j-j0+1][i-i0] - (2.0f * sh_out_m1[j-j0][i-i0])) + sh_out_m1[j-j0-1][i-i0]))) + (0.125f * ((sh_out_m1[j-j0][i-i0+1] - (2.0f * sh_out_m1[j-j0][i-i0])) + sh_out_m1[j-j0][i-i0-1]))) + sh_out_m1[j-j0][i-i0]);
}
__syncthreads ();
//Value rotation
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = sh_in_c0[j-j0][i-i0];
sh_in_c0[j-j0][i-i0] = reg_in_p1;
reg_out_m2 = sh_out_m1[j-j0][i-i0];
sh_out_m1[j-j0][i-i0] = reg_out_c0;
}
__syncthreads ();
}
}
extern "C" void host_code (float *h_in, float *h_out2, int L, int M, int N) {
float *in;
hipMalloc (&in, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *out1;
hipMalloc (&out1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
float *out2;
hipMalloc (&out2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
dim3 blockconfig_1 (32, 32, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-4), ceil(M, blockconfig_1.y-4), 1);
hipLaunchKernelGGL(( heat) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, in, out1, L, M, N);
dim3 blockconfig_2 (32, 32, 1);
dim3 gridconfig_2 (ceil(N, blockconfig_2.x-4), ceil(M, blockconfig_2.y-4), 1);
hipLaunchKernelGGL(( heat) , dim3(gridconfig_2), dim3(blockconfig_2), 0, 0, out1, out2, L, M, N);
hipMemcpy (h_out2, out2, sizeof(float)*L*M*N, hipMemcpyDeviceToHost);
}
| 4a2e90399a95e461cfdb26146fb2dd84f7f75449.cu | /* --------------------------------------------------------------------
OPTIMIZED CODE MAKING USE OF REGISTERS + SHARED MEMORY
----------------------------------------------------------------------*/
#include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void heat (float * __restrict__ in, float * __restrict__ out1, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-4);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-4);
int j = max (j0, 0) + (int)(threadIdx.y);
//Declarations
float reg_in_m1=0, __shared__ sh_in_c0[32][32], reg_in_p1=0;
float reg_out_m2=0, __shared__ sh_out_m1[32][32], reg_out_c0=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = in[0 + j*N + i];
sh_in_c0[j-j0][i-i0] = in[1*M*N + j*N + i];
}
//Rest of the computation
for (int k=1; k<=L-2; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_p1 = in[(k+1)*M*N + j*N + i];
}
__syncthreads ();
if (j >= max (j0+1, 1) & j <= min (j0+blockdim_j-2, M-2) & i >= max (i0+1, 1) & i <= min (i0+blockdim_i-2, N-2)) {
reg_out_c0 = ((((0.125f * ((reg_in_p1 - (2.0f * sh_in_c0[j-j0][i-i0])) + reg_in_m1)) + (0.125f * ((sh_in_c0[j-j0+1][i-i0] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0-1][i-i0]))) + (0.125f * ((sh_in_c0[j-j0][i-i0+1] - (2.0f * sh_in_c0[j-j0][i-i0])) + sh_in_c0[j-j0][i-i0-1]))) + sh_in_c0[j-j0][i-i0]);
}
if (j >= max (j0+2, 1) & j <= min (j0+blockdim_j-3, M-2) & i >= max (i0+2, 1) & i <= min (i0+blockdim_i-3, N-2)) {
out1[max(k-1,0)*M*N + j*N + i] = ((((0.125f * ((reg_out_c0 - (2.0f * sh_out_m1[j-j0][i-i0])) + reg_out_m2)) + (0.125f * ((sh_out_m1[j-j0+1][i-i0] - (2.0f * sh_out_m1[j-j0][i-i0])) + sh_out_m1[j-j0-1][i-i0]))) + (0.125f * ((sh_out_m1[j-j0][i-i0+1] - (2.0f * sh_out_m1[j-j0][i-i0])) + sh_out_m1[j-j0][i-i0-1]))) + sh_out_m1[j-j0][i-i0]);
}
__syncthreads ();
//Value rotation
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_in_m1 = sh_in_c0[j-j0][i-i0];
sh_in_c0[j-j0][i-i0] = reg_in_p1;
reg_out_m2 = sh_out_m1[j-j0][i-i0];
sh_out_m1[j-j0][i-i0] = reg_out_c0;
}
__syncthreads ();
}
}
extern "C" void host_code (float *h_in, float *h_out2, int L, int M, int N) {
float *in;
cudaMalloc (&in, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *out1;
cudaMalloc (&out1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for out1\n");
float *out2;
cudaMalloc (&out2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for out2\n");
dim3 blockconfig_1 (32, 32, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-4), ceil(M, blockconfig_1.y-4), 1);
heat <<<gridconfig_1, blockconfig_1>>> (in, out1, L, M, N);
dim3 blockconfig_2 (32, 32, 1);
dim3 gridconfig_2 (ceil(N, blockconfig_2.x-4), ceil(M, blockconfig_2.y-4), 1);
heat <<<gridconfig_2, blockconfig_2>>> (out1, out2, L, M, N);
cudaMemcpy (h_out2, out2, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost);
}
|
e76a6916e93f0c688d53a7958e9b2474b8f19845.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <map>
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
scale_data, prob);
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedByIdxKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * DET_SHAPE + 0] = image_id;
detection_data[index * DET_SHAPE + 1] = label;
detection_data[index * DET_SHAPE + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * DET_SHAPE + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * DET_SHAPE + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, DET_SHAPE);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GetDetectionsKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_det)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeConfLossKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
} // namespace caffe
| e76a6916e93f0c688d53a7958e9b2474b8f19845.cu | #include <algorithm>
#include <functional>
#include <map>
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "caffe/util/bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
scale_data, prob);
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedByIdxKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * DET_SHAPE + 0] = image_id;
detection_data[index * DET_SHAPE + 1] = label;
detection_data[index * DET_SHAPE + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * DET_SHAPE + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * DET_SHAPE + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, DET_SHAPE);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
GetDetectionsKernel<Dtype><<<CAFFE_GET_BLOCKS(num_det),
CAFFE_CUDA_NUM_THREADS>>>(num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeConfLossKernel<Dtype><<<CAFFE_GET_BLOCKS(num_threads),
CAFFE_CUDA_NUM_THREADS>>>(num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
} // namespace caffe
|
f74e65aefdc98bf6838ad4317711cb17ce1d9a5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <math.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <c10/hip/HIPMathCompat.h>
namespace at { namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
input,
result,
[=] __device__ (
const scalar_t& input_val,
scalar_t& result_val) {
result_val = (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
[=] __device__ (
const scalar_t& input_val,
const scalar_t& grad_out_val,
scalar_t& input_grad_val,
scalar_t& weight_grad_collector_val) {
input_grad_val = (input_val > 0) ? grad_out_val : *weight_data * grad_out_val;
weight_grad_collector_val = (input_val > 0) ? scalar_t(0) : input_val * grad_out_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
hipLaunchKernelGGL(( prelu_cuda_backward_kernel_multi_weights<scalar_t>)
, dim3(grid), dim3(block), 0, stream,
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "hardshrink_cuda", [&] {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
});
}
void softshrink_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "softshrink_cuda", [&] {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
});
}
void shrink_backward_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "shrink_backward_cuda", [&] {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, Scalar min, Scalar max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "softplus_cuda", [&] {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(::exp(a * beta))) / beta;
});
});
});
}
void softplus_backward_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "softplus_backward_cuda", [&] {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = ::exp(b * beta);
return (b * beta) > threshold ? a : a * (z - scalar_t(1.)) / z;
});
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel(TensorIterator& iter, Scalar threshold, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
});
}
void elu_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "elu_cuda", [&] {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
// WARNING (@zasdfgbnm, 2020-01-16): This is very fragile!
//
// The code below does not look like a great implementation because both positive
// and negative case are computed regardless of the condition, and you might want
// to optimize this. But this implementation is due to a compiler bug in handling
// branch. If we implement it in a correct way, the generated code will produce
// wrong result. This bug should be fixed in future CUDA, but we will need this
// workaround for maybe years until all the current CUDAs become obsolete.
//
// TODO: this workaround might become no longer necessary if the implementation of
// GPU loop in `Loops.cuh` is changed. We should make this great again once that
// change happens.
scalar_t positive_case = a * poscoef;
scalar_t negative_case = (static_cast<scalar_t>(::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
return a > scalar_t(0) ? positive_case : negative_case;
});
});
});
}
void elu_backward_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "elu_backward_cuda", [&] {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
});
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "GeluCUDAKernelImpl", [&] {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::hip::compat::normcdf(static_cast<T_ACC>(x));
});
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "GeluBackwardCUDAKernelImpl", [&] {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::hip::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::hip::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
});
}
void leaky_relu_kernel(TensorIterator& iter, Scalar negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "leaky_relu_cuda", [&] {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
});
}
void leaky_relu_backward_kernel(TensorIterator& iter, Scalar negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "leaky_relu_backward_cuda", [&] {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
});
}
} // namespace
Tensor gelu_cuda(const Tensor& self) {
Tensor Y = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::unary_op(Y, self);
GeluCUDAKernelImpl(it);
return Y;
}
Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
Tensor dX = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::binary_op(dX, grad, self);
GeluBackwardCUDAKernelImpl(it);
return dX;
}
// computes `result = self <= threshold ? value : other`
// other is `self` in threshold() and `grad` in threshold_backward()
static Tensor threshold_out_cuda(
optional<Tensor> opt_result,
const Tensor& self,
Scalar threshold,
Scalar value,
const Tensor& other) {
Tensor result = opt_result.value_or(Tensor());
auto iter = TensorIterator::binary_op(result, self, other);
threshold_kernel(iter, threshold, value);
return iter.output();
}
Tensor threshold_cuda(const Tensor& self, Scalar threshold, Scalar value) {
return threshold_out_cuda(nullopt, self, threshold, value, self);
}
Tensor& threshold__cuda(Tensor& self, Scalar threshold, Scalar value) {
threshold_out_cuda(make_optional(self), self, threshold, value, self);
return self;
}
Tensor& threshold_out_cuda(Tensor& result, const Tensor& self, Scalar threshold, Scalar value) {
threshold_out_cuda(make_optional(result), self, threshold, value, self);
return result;
}
Tensor threshold_backward_cuda(const Tensor& grad, const Tensor& self, Scalar threshold) {
return threshold_out_cuda(nullopt, self, threshold, 0, grad);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
}} // namespace at::native
| f74e65aefdc98bf6838ad4317711cb17ce1d9a5d.cu | #define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <math.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <c10/cuda/CUDAMathCompat.h>
namespace at { namespace native {
// -----------------------------------
// prelu forward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_kernel_share_weights(
const Tensor& input,
Tensor& result,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply2<scalar_t, scalar_t>(
input,
result,
[=] __device__ (
const scalar_t& input_val,
scalar_t& result_val) {
result_val = (input_val > 0) ? input_val : *weight_data * input_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_kernel_multi_weights(
scalar_t* result_data,
const scalar_t* input_data,
const scalar_t* weight_data,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
// multiply values at each channel with weight[channel_index]
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
}
Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
int64_t weight_num = weight.numel();
Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto strides = input.strides();
// case1: shared weight for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_share_weights<scalar_t>(
input,
result,
weight.data_ptr<scalar_t>());
});
}
else { // case2: multiple weights, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
prelu_cuda_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
result.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
}
return result;
}
// -----------------------------------
// prelu backward
// -----------------------------------
template <typename scalar_t>
void prelu_cuda_backward_kernel_share_weights(
const Tensor& input,
const Tensor& grad_out,
Tensor& input_grad,
Tensor& weight_grad_collector,
const scalar_t* weight_data) {
at::cuda::CUDA_tensor_apply4<scalar_t, scalar_t, scalar_t, scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
[=] __device__ (
const scalar_t& input_val,
const scalar_t& grad_out_val,
scalar_t& input_grad_val,
scalar_t& weight_grad_collector_val) {
input_grad_val = (input_val > 0) ? grad_out_val : *weight_data * grad_out_val;
weight_grad_collector_val = (input_val > 0) ? scalar_t(0) : input_val * grad_out_val;
});
}
template <typename scalar_t>
__global__ void prelu_cuda_backward_kernel_multi_weights(
const scalar_t* input_data,
const scalar_t* weight_data,
const scalar_t* grad_out_data,
scalar_t* input_grad_data,
scalar_t* weight_grad_collector,
int64_t input_stride0,
int64_t input_stride1,
int64_t input_numel) {
int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
if (linearId >= input_numel) return;
int64_t channel = (linearId % input_stride0) / input_stride1;
scalar_t input_data_val = input_data[linearId];
scalar_t grad_out_data_val = grad_out_data[linearId];
input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
}
std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
TORCH_CHECK(grad_out_.is_cuda());
TORCH_CHECK(self.is_cuda());
TORCH_CHECK(weight_.is_cuda());
auto input = self.contiguous();
auto grad_out = grad_out_.contiguous();
auto weight = weight_.contiguous();
TORCH_CHECK(input.is_contiguous());
TORCH_CHECK(weight.is_contiguous());
TORCH_CHECK(grad_out.is_contiguous());
int64_t weight_num = weight.numel();
auto strides = input.strides();
auto dims = input.dim();
Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
// case1: shared parameter for all channels
if (weight_num == 1) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_share_weights<scalar_t>(
input,
grad_out,
input_grad,
weight_grad_collector,
weight.data_ptr<scalar_t>());
});
weight_grad.fill_(weight_grad_collector.sum());
}
else { // case2: multiple parameters, one for each channel
int64_t input_ndim = input.dim();
TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
int64_t channel_size = 1; // channel_size default to 1
int64_t input_stride0 = 1, input_stride1 = 1;
if (input_ndim > 1) {
channel_size = input.size(1); // channel is the 2nd dim of input
input_stride0 = strides[0];
input_stride1 = strides[1];
}
TORCH_CHECK(channel_size == weight_num,
"Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
" and channel size = ", channel_size, ".");
// config to run cuda kernel
int64_t input_numel = input.numel();
const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
prelu_cuda_backward_kernel_multi_weights<scalar_t>
<<<grid, block, 0, stream>>>(
input.data_ptr<scalar_t>(),
weight.data_ptr<scalar_t>(),
grad_out.data_ptr<scalar_t>(),
input_grad.data_ptr<scalar_t>(),
weight_grad_collector.data_ptr<scalar_t>(),
input_stride0,
input_stride1,
input_numel);
});
// update weight_grad
std::vector<int64_t> reduce_dims;
reduce_dims.push_back(0);
if (dims > 2) {
for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
}
weight_grad = weight_grad_collector.sum(reduce_dims);
}
return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
}
// -----------------------------------
// hardshrink
// -----------------------------------
void hardshrink_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "hardshrink_cuda", [&] {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
});
}
void softshrink_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "softshrink_cuda", [&] {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
});
});
});
}
void shrink_backward_kernel(TensorIterator& iter, Scalar value) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "shrink_backward_cuda", [&] {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
});
});
});
}
void hardtanh_backward_kernel(TensorIterator& iter, Scalar min, Scalar max) {
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
auto min_val = min.to<scalar_t>();
auto max_val = max.to<scalar_t>();
gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
});
});
}
void softplus_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "softplus_cuda", [&] {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta;
});
});
});
}
void softplus_backward_kernel(TensorIterator& iter, Scalar beta_, Scalar threshold_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "softplus_backward_cuda", [&] {
auto beta = beta_.to<scalar_t>();
auto threshold = threshold_.to<scalar_t>();
gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t z = std::exp(b * beta);
return (b * beta) > threshold ? a : a * (z - scalar_t(1.)) / z;
});
});
});
}
template <typename scalar_t>
void threshold_kernel_impl(TensorIterator& iter, scalar_t threshold, scalar_t value) {
gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
return x <= threshold ? value : other;
});
}
static void threshold_kernel(TensorIterator& iter, Scalar threshold, Scalar value) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "threshold_cuda", [&] {
threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
});
});
}
void elu_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "elu_cuda", [&] {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
// WARNING (@zasdfgbnm, 2020-01-16): This is very fragile!
//
// The code below does not look like a great implementation because both positive
// and negative case are computed regardless of the condition, and you might want
// to optimize this. But this implementation is due to a compiler bug in handling
// branch. If we implement it in a correct way, the generated code will produce
// wrong result. This bug should be fixed in future CUDA, but we will need this
// workaround for maybe years until all the current CUDAs become obsolete.
//
// TODO: this workaround might become no longer necessary if the implementation of
// GPU loop in `Loops.cuh` is changed. We should make this great again once that
// change happens.
scalar_t positive_case = a * poscoef;
scalar_t negative_case = (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
return a > scalar_t(0) ? positive_case : negative_case;
});
});
});
}
void elu_backward_kernel(TensorIterator& iter, Scalar alpha, Scalar scale, Scalar input_scale) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "elu_backward_cuda", [&] {
auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
auto poscoef = scale.to<scalar_t>();
auto negiptcoef = input_scale.to<scalar_t>();
gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
});
});
});
}
namespace {
void GeluCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "GeluCUDAKernelImpl", [&] {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
return static_cast<T_ACC>(x) *
c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
});
});
});
}
void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "GeluBackwardCUDAKernelImpl", [&] {
using T_ACC = acc_type<scalar_t, true>;
gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
const T_ACC pdf =
c10::cuda::compat::exp(
T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
kBeta;
return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
});
});
});
}
void leaky_relu_kernel(TensorIterator& iter, Scalar negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "leaky_relu_cuda", [&] {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
return a > scalar_t(0) ? a : a * negval;
});
});
});
}
void leaky_relu_backward_kernel(TensorIterator& iter, Scalar negval_) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "leaky_relu_backward_cuda", [&] {
auto negval = negval_.to<scalar_t>();
gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a > scalar_t(0) ? b : b * negval;
});
});
});
}
} // namespace
Tensor gelu_cuda(const Tensor& self) {
Tensor Y = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::unary_op(Y, self);
GeluCUDAKernelImpl(it);
return Y;
}
Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
Tensor dX = at::native::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto it = TensorIterator::binary_op(dX, grad, self);
GeluBackwardCUDAKernelImpl(it);
return dX;
}
// computes `result = self <= threshold ? value : other`
// other is `self` in threshold() and `grad` in threshold_backward()
static Tensor threshold_out_cuda(
optional<Tensor> opt_result,
const Tensor& self,
Scalar threshold,
Scalar value,
const Tensor& other) {
Tensor result = opt_result.value_or(Tensor());
auto iter = TensorIterator::binary_op(result, self, other);
threshold_kernel(iter, threshold, value);
return iter.output();
}
Tensor threshold_cuda(const Tensor& self, Scalar threshold, Scalar value) {
return threshold_out_cuda(nullopt, self, threshold, value, self);
}
Tensor& threshold__cuda(Tensor& self, Scalar threshold, Scalar value) {
threshold_out_cuda(make_optional(self), self, threshold, value, self);
return self;
}
Tensor& threshold_out_cuda(Tensor& result, const Tensor& self, Scalar threshold, Scalar value) {
threshold_out_cuda(make_optional(result), self, threshold, value, self);
return result;
}
Tensor threshold_backward_cuda(const Tensor& grad, const Tensor& self, Scalar threshold) {
return threshold_out_cuda(nullopt, self, threshold, 0, grad);
}
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
REGISTER_DISPATCH(elu_stub, &elu_kernel);
REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
}} // namespace at::native
|
9ad7a5d01be0b0fd634a754d2cae7cbc6317e2b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 -fvisibility hidden -fapply-global-visibility-to-externs \
// RUN: -emit-llvm -o - -x hip %s -fsyntax-only -verify
// RUN: %clang_cc1 -triple x86_64 -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s -fsyntax-only -verify
#define __device__ __attribute__((device))
#define __constant__ __attribute__((constant))
#define __hip_pinned_shadow__ __attribute((hip_pinned_shadow))
struct textureReference {
int a;
};
template <class T, int texType, int hipTextureReadMode>
struct texture : public textureReference {
texture() { a = 1; }
};
__hip_pinned_shadow__ texture<float, 2, 1> tex;
__device__ __hip_pinned_shadow__ texture<float, 2, 1> tex2; // expected-error{{'hip_pinned_shadow' and 'device' attributes are not compatible}}
// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables}}
// expected-note@-2{{conflicting attribute is here}}
__constant__ __hip_pinned_shadow__ texture<float, 2, 1> tex3; // expected-error{{'hip_pinned_shadow' and 'constant' attributes are not compatible}}
// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables}}
// expected-note@-2{{conflicting attribute is here}}
| 9ad7a5d01be0b0fd634a754d2cae7cbc6317e2b0.cu | // RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 -fvisibility hidden -fapply-global-visibility-to-externs \
// RUN: -emit-llvm -o - -x hip %s -fsyntax-only -verify
// RUN: %clang_cc1 -triple x86_64 -std=c++11 \
// RUN: -emit-llvm -o - -x hip %s -fsyntax-only -verify
#define __device__ __attribute__((device))
#define __constant__ __attribute__((constant))
#define __hip_pinned_shadow__ __attribute((hip_pinned_shadow))
struct textureReference {
int a;
};
template <class T, int texType, int hipTextureReadMode>
struct texture : public textureReference {
texture() { a = 1; }
};
__hip_pinned_shadow__ texture<float, 2, 1> tex;
__device__ __hip_pinned_shadow__ texture<float, 2, 1> tex2; // expected-error{{'hip_pinned_shadow' and 'device' attributes are not compatible}}
// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables}}
// expected-note@-2{{conflicting attribute is here}}
__constant__ __hip_pinned_shadow__ texture<float, 2, 1> tex3; // expected-error{{'hip_pinned_shadow' and 'constant' attributes are not compatible}}
// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables}}
// expected-note@-2{{conflicting attribute is here}}
|
158d4a0482d69f29184fa839d1a3534748d2a717.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_ss.cu
*
* @brief Simple test driver program for Gunrock template.
*/
#include <gunrock/app/ss/ss_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return hipError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use float as the value type
hipError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT,
graph::HAS_EDGE_VALUES | graph::HAS_CSR>
GraphT;
hipError_t retval = hipSuccess;
util::CpuTimer cpu_timer;
GraphT graph;
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
std::string validation = parameters.Get<std::string>("validation");
if (quick && (parameters.UseDefault("validation") == false && validation != "none")) {
util::PrintMsg("Invalid options --quick and --validation=" + validation +
", no CPU reference result to validate");
return retval;
}
int num_runs = parameters.Get<int>("num-runs");
SizeT nodes = graph.nodes;
VertexT *ref_scan_stats = new VertexT[nodes];
if (!quick) {
util::PrintMsg("__________________________", !quiet);
float elapsed =
app::ss::CPU_Reference(parameters, graph.csr(), ref_scan_stats);
util::PrintMsg("__________________________\nRun CPU Reference Avg. in " +
std::to_string(num_runs) + " iterations elapsed: " +
std::to_string(elapsed) + " ms",
!quiet);
}
std::vector<std::string> switches{"advance-mode"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[ref_scan_stats](util::Parameters ¶meters, GraphT &graph) {
return app::ss::RunTests(parameters, graph, ref_scan_stats);
}));
if (ref_scan_stats != NULL) {
delete[] ref_scan_stats;
ref_scan_stats = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
hipError_t retval = hipSuccess;
util::Parameters parameters("test Scan Statistics");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::ss::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return hipSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F64B | app::UNDIRECTED | app::DIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 158d4a0482d69f29184fa839d1a3534748d2a717.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_ss.cu
*
* @brief Simple test driver program for Gunrock template.
*/
#include <gunrock/app/ss/ss_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use float as the value type
cudaError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT,
graph::HAS_EDGE_VALUES | graph::HAS_CSR>
GraphT;
cudaError_t retval = cudaSuccess;
util::CpuTimer cpu_timer;
GraphT graph;
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
std::string validation = parameters.Get<std::string>("validation");
if (quick && (parameters.UseDefault("validation") == false && validation != "none")) {
util::PrintMsg("Invalid options --quick and --validation=" + validation +
", no CPU reference result to validate");
return retval;
}
int num_runs = parameters.Get<int>("num-runs");
SizeT nodes = graph.nodes;
VertexT *ref_scan_stats = new VertexT[nodes];
if (!quick) {
util::PrintMsg("__________________________", !quiet);
float elapsed =
app::ss::CPU_Reference(parameters, graph.csr(), ref_scan_stats);
util::PrintMsg("__________________________\nRun CPU Reference Avg. in " +
std::to_string(num_runs) + " iterations elapsed: " +
std::to_string(elapsed) + " ms",
!quiet);
}
std::vector<std::string> switches{"advance-mode"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[ref_scan_stats](util::Parameters ¶meters, GraphT &graph) {
return app::ss::RunTests(parameters, graph, ref_scan_stats);
}));
if (ref_scan_stats != NULL) {
delete[] ref_scan_stats;
ref_scan_stats = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test Scan Statistics");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::ss::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F64B | app::UNDIRECTED | app::DIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
b9f6ecf0007e2daebcac1c3c81128d7ef6219147.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <time.h>
#include "cuStopwatch.cu"
#define SHIFT 13
__global__ void transpose_1(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) {
uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y;
if((xid < h)&&(yid < w)){
mat_trans[yid + w * xid] = mat[xid + w * yid];
}
return;
}
__global__ void transpose_2(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) {
uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y;
if((xid < h)&&(yid < w)){
mat_trans[xid + h * yid] = mat[yid + h * xid];
}
return;
}
void randgen(uint32_t* arr, size_t count){
uint32_t state = time(NULL);
for(uint32_t i = 0; i < count; i++){
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
arr[i] = state;
}
return;
}
int main() {
// Allocate memory, filling in random data and transfer to device
uint32_t *mat_host, *mat_dev, *mat_res_dev;
const uint32_t mat_size = 1 << (SHIFT * 2);
const uint32_t mat_side = 1 << SHIFT;
hipHostMalloc((void**)&mat_host, mat_size*sizeof(uint32_t), hipHostMallocDefault);
hipMalloc((void**)&mat_dev, mat_size*sizeof(uint32_t));
hipMalloc((void**)&mat_res_dev, mat_size*sizeof(uint32_t));
printf("Copying data to device\n");
randgen(mat_host, mat_size);
hipMemcpy(mat_dev, mat_host, mat_size*sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostFree(mat_host);
// Performing matrix transposition on a 2^13 * 2^13 matrix
dim3 blocksize(32, 32);
dim3 gridsize(mat_side / 32, mat_side / 32);
printf("First method\n");
cuStopwatch sw1;
sw1.start();
hipLaunchKernelGGL(( transpose_1), dim3(gridsize), dim3(blocksize), 0, 0, mat_dev, mat_res_dev, mat_side, mat_side);
printf("%.4fms\n", sw1.stop());
printf("\nSecond method\n");
cuStopwatch sw2;
sw2.start();
hipLaunchKernelGGL(( transpose_2), dim3(gridsize), dim3(blocksize), 0, 0, mat_dev, mat_res_dev, mat_side, mat_side);
printf("%.4fms\n", sw2.stop());
// Free memory
hipFree(mat_dev);
hipFree(mat_res_dev);
return 0;
} | b9f6ecf0007e2daebcac1c3c81128d7ef6219147.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <time.h>
#include "cuStopwatch.cu"
#define SHIFT 13
__global__ void transpose_1(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) {
uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y;
if((xid < h)&&(yid < w)){
mat_trans[yid + w * xid] = mat[xid + w * yid];
}
return;
}
__global__ void transpose_2(const uint32_t* mat, uint32_t* mat_trans, uint32_t w, uint32_t h) {
uint32_t xid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t yid = threadIdx.y + blockIdx.y * blockDim.y;
if((xid < h)&&(yid < w)){
mat_trans[xid + h * yid] = mat[yid + h * xid];
}
return;
}
void randgen(uint32_t* arr, size_t count){
uint32_t state = time(NULL);
for(uint32_t i = 0; i < count; i++){
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
arr[i] = state;
}
return;
}
int main() {
// Allocate memory, filling in random data and transfer to device
uint32_t *mat_host, *mat_dev, *mat_res_dev;
const uint32_t mat_size = 1 << (SHIFT * 2);
const uint32_t mat_side = 1 << SHIFT;
cudaHostAlloc((void**)&mat_host, mat_size*sizeof(uint32_t), cudaHostAllocDefault);
cudaMalloc((void**)&mat_dev, mat_size*sizeof(uint32_t));
cudaMalloc((void**)&mat_res_dev, mat_size*sizeof(uint32_t));
printf("Copying data to device\n");
randgen(mat_host, mat_size);
cudaMemcpy(mat_dev, mat_host, mat_size*sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaFreeHost(mat_host);
// Performing matrix transposition on a 2^13 * 2^13 matrix
dim3 blocksize(32, 32);
dim3 gridsize(mat_side / 32, mat_side / 32);
printf("First method\n");
cuStopwatch sw1;
sw1.start();
transpose_1<<<gridsize, blocksize>>>(mat_dev, mat_res_dev, mat_side, mat_side);
printf("%.4fms\n", sw1.stop());
printf("\nSecond method\n");
cuStopwatch sw2;
sw2.start();
transpose_2<<<gridsize, blocksize>>>(mat_dev, mat_res_dev, mat_side, mat_side);
printf("%.4fms\n", sw2.stop());
// Free memory
cudaFree(mat_dev);
cudaFree(mat_res_dev);
return 0;
} |
56f3b130ba8a903f0134297534631956f2372cfd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/reduce_v.cuh>
#include <thrust/reduce.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
template <typename vertex_t, typename... Args>
struct property_transform : public thrust::unary_function<vertex_t, thrust::tuple<Args...>> {
int mod{};
property_transform(int mod_count) : mod(mod_count) {}
constexpr __device__ auto operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
auto value = hash_func(val) % mod;
return thrust::make_tuple(static_cast<Args>(value)...);
}
};
template <typename vertex_t, template <typename...> typename Tuple, typename... Args>
struct property_transform<vertex_t, Tuple<Args...>> : public property_transform<vertex_t, Args...> {
};
template <typename Tuple, std::size_t... I>
auto make_iterator_tuple(Tuple& data, std::index_sequence<I...>)
{
return thrust::make_tuple((std::get<I>(data).begin())...);
}
template <typename... Args>
auto get_zip_iterator(std::tuple<Args...>& data)
{
return thrust::make_zip_iterator(make_iterator_tuple(
data, std::make_index_sequence<std::tuple_size<std::tuple<Args...>>::value>()));
}
template <typename T>
auto get_property_iterator(std::tuple<T>& data)
{
return (std::get<0>(data)).begin();
}
template <typename T0, typename... Args>
auto get_property_iterator(std::tuple<T0, Args...>& data)
{
return get_zip_iterator(data);
}
template <typename... Args>
struct generate_impl {
static thrust::tuple<Args...> initial_value(int init)
{
return thrust::make_tuple(static_cast<Args>(init)...);
}
template <typename label_t>
static std::tuple<rmm::device_uvector<Args>...> property(rmm::device_uvector<label_t>& labels,
int hash_bin_count,
raft::handle_t const& handle)
{
auto data = std::make_tuple(rmm::device_uvector<Args>(labels.size(), handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(handle.get_thrust_policy(),
labels.begin(),
labels.end(),
zip,
property_transform<label_t, Args...>(hash_bin_count));
return data;
}
template <typename label_t>
static std::tuple<rmm::device_uvector<Args>...> property(thrust::counting_iterator<label_t> begin,
thrust::counting_iterator<label_t> end,
int hash_bin_count,
raft::handle_t const& handle)
{
auto length = thrust::distance(begin, end);
auto data = std::make_tuple(rmm::device_uvector<Args>(length, handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(handle.get_thrust_policy(),
begin,
end,
zip,
property_transform<label_t, Args...>(hash_bin_count));
return data;
}
};
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-3};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
template <typename T>
struct generate : public generate_impl<T> {
static T initial_value(int init) { return static_cast<T>(init); }
};
template <typename... Args>
struct generate<std::tuple<Args...>> : public generate_impl<Args...> {
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_ReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_ReduceV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (cugraph::test::g_perf) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, input_usecase, true, true);
if (cugraph::test::g_perf) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
// 3. run MG reduce_v
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value = generate<result_t>::initial_value(initial_value);
auto property_data =
generate<result_t>::property((*d_mg_renumber_map_labels), hash_bin_count, handle);
auto property_iter = get_property_iterator(property_data);
if (cugraph::test::g_perf) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto result = reduce_v(handle,
mg_graph_view,
property_iter,
property_iter + (*d_mg_renumber_map_labels).size(),
property_initial_value);
if (cugraph::test::g_perf) {
CUDA_TRY(hipDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG reduce_v took " << elapsed_time * 1e-6 << " s.\n";
}
//// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(handle);
std::tie(sg_graph, std::ignore) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, input_usecase, true, false);
auto sg_graph_view = sg_graph.view();
auto sg_property_data = generate<result_t>::property(
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
hash_bin_count,
handle);
auto sg_property_iter = get_property_iterator(sg_property_data);
using property_t = decltype(property_initial_value);
auto expected_result =
thrust::reduce(handle.get_thrust_policy(),
sg_property_iter,
sg_property_iter + sg_graph_view.get_number_of_local_vertices(),
property_initial_value,
cugraph::property_add<property_t>());
result_compare<property_t> compare{};
ASSERT_TRUE(compare(expected_result, result));
}
}
};
using Tests_MG_ReduceV_File = Tests_MG_ReduceV<cugraph::test::File_Usecase>;
using Tests_MG_ReduceV_Rmat = Tests_MG_ReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_ReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_ReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MG_ReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
| 56f3b130ba8a903f0134297534631956f2372cfd.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <utilities/high_res_clock.h>
#include <utilities/base_fixture.hpp>
#include <utilities/device_comm_wrapper.hpp>
#include <utilities/test_graphs.hpp>
#include <utilities/test_utilities.hpp>
#include <utilities/thrust_wrapper.hpp>
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/reduce_v.cuh>
#include <thrust/reduce.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
template <typename vertex_t, typename... Args>
struct property_transform : public thrust::unary_function<vertex_t, thrust::tuple<Args...>> {
int mod{};
property_transform(int mod_count) : mod(mod_count) {}
constexpr __device__ auto operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
auto value = hash_func(val) % mod;
return thrust::make_tuple(static_cast<Args>(value)...);
}
};
template <typename vertex_t, template <typename...> typename Tuple, typename... Args>
struct property_transform<vertex_t, Tuple<Args...>> : public property_transform<vertex_t, Args...> {
};
template <typename Tuple, std::size_t... I>
auto make_iterator_tuple(Tuple& data, std::index_sequence<I...>)
{
return thrust::make_tuple((std::get<I>(data).begin())...);
}
template <typename... Args>
auto get_zip_iterator(std::tuple<Args...>& data)
{
return thrust::make_zip_iterator(make_iterator_tuple(
data, std::make_index_sequence<std::tuple_size<std::tuple<Args...>>::value>()));
}
template <typename T>
auto get_property_iterator(std::tuple<T>& data)
{
return (std::get<0>(data)).begin();
}
template <typename T0, typename... Args>
auto get_property_iterator(std::tuple<T0, Args...>& data)
{
return get_zip_iterator(data);
}
template <typename... Args>
struct generate_impl {
static thrust::tuple<Args...> initial_value(int init)
{
return thrust::make_tuple(static_cast<Args>(init)...);
}
template <typename label_t>
static std::tuple<rmm::device_uvector<Args>...> property(rmm::device_uvector<label_t>& labels,
int hash_bin_count,
raft::handle_t const& handle)
{
auto data = std::make_tuple(rmm::device_uvector<Args>(labels.size(), handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(handle.get_thrust_policy(),
labels.begin(),
labels.end(),
zip,
property_transform<label_t, Args...>(hash_bin_count));
return data;
}
template <typename label_t>
static std::tuple<rmm::device_uvector<Args>...> property(thrust::counting_iterator<label_t> begin,
thrust::counting_iterator<label_t> end,
int hash_bin_count,
raft::handle_t const& handle)
{
auto length = thrust::distance(begin, end);
auto data = std::make_tuple(rmm::device_uvector<Args>(length, handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(handle.get_thrust_policy(),
begin,
end,
zip,
property_transform<label_t, Args...>(hash_bin_count));
return data;
}
};
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-3};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (std::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (std::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
template <typename T>
struct generate : public generate_impl<T> {
static T initial_value(int init) { return static_cast<T>(init); }
};
template <typename... Args>
struct generate<std::tuple<Args...>> : public generate_impl<Args...> {
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_ReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_ReduceV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, input_usecase, true, true);
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
// 3. run MG reduce_v
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value = generate<result_t>::initial_value(initial_value);
auto property_data =
generate<result_t>::property((*d_mg_renumber_map_labels), hash_bin_count, handle);
auto property_iter = get_property_iterator(property_data);
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto result = reduce_v(handle,
mg_graph_view,
property_iter,
property_iter + (*d_mg_renumber_map_labels).size(),
property_initial_value);
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG reduce_v took " << elapsed_time * 1e-6 << " s.\n";
}
//// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(handle);
std::tie(sg_graph, std::ignore) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, input_usecase, true, false);
auto sg_graph_view = sg_graph.view();
auto sg_property_data = generate<result_t>::property(
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
hash_bin_count,
handle);
auto sg_property_iter = get_property_iterator(sg_property_data);
using property_t = decltype(property_initial_value);
auto expected_result =
thrust::reduce(handle.get_thrust_policy(),
sg_property_iter,
sg_property_iter + sg_graph_view.get_number_of_local_vertices(),
property_initial_value,
cugraph::property_add<property_t>());
result_compare<property_t> compare{};
ASSERT_TRUE(compare(expected_result, result));
}
}
};
using Tests_MG_ReduceV_File = Tests_MG_ReduceV<cugraph::test::File_Usecase>;
using Tests_MG_ReduceV_Rmat = Tests_MG_ReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_ReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_ReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MG_ReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
52be80d920b2634f4157cef59c264de1fa88ff13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 10
__global__ void sum(int *a,
int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int main( void ) {
int host_a[N];
int host_b[N];
int host_c[N];
for (int i=0; i<N; i++) {
host_a[i] = i;
host_b[i] = i;
}
int *dev_a, *dev_b, *dev_c;
hipMalloc(&dev_a, sizeof(int) * N);
hipMalloc(&dev_b, sizeof(int) * N);
hipMalloc(&dev_c, sizeof(int) * N);
hipMemcpy(dev_a, host_a, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, host_b, sizeof(int) * N,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(host_c, dev_c, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf("%d ", host_c[i]);
}
printf("\n");
} | 52be80d920b2634f4157cef59c264de1fa88ff13.cu | #include <stdio.h>
#define N 10
__global__ void sum(int *a,
int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int main( void ) {
int host_a[N];
int host_b[N];
int host_c[N];
for (int i=0; i<N; i++) {
host_a[i] = i;
host_b[i] = i;
}
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, sizeof(int) * N);
cudaMalloc(&dev_b, sizeof(int) * N);
cudaMalloc(&dev_c, sizeof(int) * N);
cudaMemcpy(dev_a, host_a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, sizeof(int) * N,
cudaMemcpyHostToDevice);
sum<<<N, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(host_c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf("%d ", host_c[i]);
}
printf("\n");
} |
449d4f68aef389c4b53cb303448fdd73c7b1481c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details( int* input)
{
int tid = (threadIdx.z*blockDim.x*blockDim.y)+(threadIdx.y*blockDim.x)+threadIdx.x;
int num_of_thread_in_a_block = blockDim.x * blockDim.y * blockDim.z;
int block_offset = num_of_thread_in_a_block * blockIdx.x;
int num_of_threads_in_a_row = num_of_thread_in_a_block * gridDim.x;
int row_offset = num_of_threads_in_a_row * blockIdx.y;
int num_of_thread_in_xy = num_of_thread_in_a_block * gridDim.x * gridDim.y;
int z_offset = num_of_thread_in_xy * blockIdx.z;
int gid = tid + block_offset + row_offset + z_offset;
printf("tid : %d , gid : %d , value : %d \n", tid, gid, input[gid]);
}
int main()
{
int size = 64;
int byte_size = size * sizeof(int);
int* h_input;
h_input = (int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for (int i=0; i <size; i++)
{
h_input[i] = (int)(rand() & 0xff);
}
int* d_input;
hipMalloc((void**)&d_input,byte_size);
hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice);
dim3 block(2,2,2);
dim3 grid(2,2,2);
print_details << < grid, block >> > (d_input);
hipDeviceSynchronize();
hipFree(d_input);
free(h_input);
hipDeviceReset();
}
| 449d4f68aef389c4b53cb303448fdd73c7b1481c.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details( int* input)
{
int tid = (threadIdx.z*blockDim.x*blockDim.y)+(threadIdx.y*blockDim.x)+threadIdx.x;
int num_of_thread_in_a_block = blockDim.x * blockDim.y * blockDim.z;
int block_offset = num_of_thread_in_a_block * blockIdx.x;
int num_of_threads_in_a_row = num_of_thread_in_a_block * gridDim.x;
int row_offset = num_of_threads_in_a_row * blockIdx.y;
int num_of_thread_in_xy = num_of_thread_in_a_block * gridDim.x * gridDim.y;
int z_offset = num_of_thread_in_xy * blockIdx.z;
int gid = tid + block_offset + row_offset + z_offset;
printf("tid : %d , gid : %d , value : %d \n", tid, gid, input[gid]);
}
int main()
{
int size = 64;
int byte_size = size * sizeof(int);
int* h_input;
h_input = (int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for (int i=0; i <size; i++)
{
h_input[i] = (int)(rand() & 0xff);
}
int* d_input;
cudaMalloc((void**)&d_input,byte_size);
cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice);
dim3 block(2,2,2);
dim3 grid(2,2,2);
print_details << < grid, block >> > (d_input);
cudaDeviceSynchronize();
cudaFree(d_input);
free(h_input);
cudaDeviceReset();
}
|
ResidualMaker.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "residualMaker.cuh"
#include "stdio.h"
#define KERNELS_COUNT 30
__global__ void make_res_1st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
int row, col;
row = tile_top_left / src_width + tile_height + 1;
col = tile_top_left % src_width + tile_width + 1;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if ((!row) || (!col) || (col >= src_width - 1) || row >= (src_height - 1))
{
return;
}
}*/
float ptr_data[11][11];
float ptr_results[11][11];
//return;
int offset = 0;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width + 1 : tile_height + 1;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width + 1 : tile_width + 1;
int tile_top_left0 = tile_top_left;
//tile_top_left0 += + 1;
switch (kernel_index)
{
case 1://left
case 8://leftdown
src += src_width;
break;
case 2://right
case 7://rightdown
case 4://down
src += src_width + 1;
break;
case 3://up
case 6://RU
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://left
/*for (int i = 0; i < tile_height ; i++)
{
for (int j = 0; j < tile_width; j++)
{
ptr_results[i][j] = ptr_data[i][j];
}
}*/
for (int i = 0; i < tile_height; i++)
{
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = ptr_data[i][j - 1] - ptr_data[i][j];
}
}
break;
case 2://right
for (int i = 0; i < tile_height ; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width; j++)
{
ptr_results[i][j] = ptr_data[i][j + 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 3://up
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j ] = ptr_data[i - 1][j] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i][j] = ptr_data[i + 1][j] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 5://leftup
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 6://rightup
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j] = ptr_data[i - 1][j + 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 7://rightdown
for (int i = 0; i < tile_height ; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i][j] = ptr_data[i + 1][j + 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 8://leftdown
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = ptr_data[i + 1][j - 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_2st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
int row, col;
row = tile_top_left / src_width + tile_height + 2;
col = tile_top_left % src_width + tile_width + 2;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if (!(tile_top_left % src_width) || (tile_top_left % src_width == src_width - 1) || tile_top_left < src_width || tile_top_left >(src_size - src_width))
return;
}*/
float ptr_data[11][11];
float ptr_results[11][11];
//return;
int offset = 0;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 2;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 2;
int tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://h
src += src_width;
break;
case 2://v
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://h
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i ][j - 1] = (ptr_data[i][j - 1] + ptr_data[i][j + 1]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 2://v
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width; j++)
{
ptr_results[i - 1][j] = (ptr_data[i - 1][j] + ptr_data[i + 1][j]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 3://diag
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = (ptr_data[i - 1][j - 1] + ptr_data[i + 1][j + 1]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 4://mdiag
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = (ptr_data[i - 1][j + 1] + ptr_data[i + 1][j - 1]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_3x3(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
float ptr_data[11][11];
float ptr_results[11][11];
int offset = 0;
int row, col;
row = tile_top_left / src_width + tile_height + 2;
col = tile_top_left % src_width + tile_width + 2;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if (!(tile_top_left % src_width) || (tile_top_left % src_width == src_width - 1) || tile_top_left < src_width || tile_top_left >(src_size - src_width))
return;
}*/
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 2;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 2;
//int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 2;
//int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 2;
int tile_top_left0 = tile_top_left;
//tile_top_left0 += src_width + 1;
switch (kernel_index)
{
case 4://h
src += src_width;
break;
case 3://v
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://left
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = 2 * (ptr_data[i][j - 1] + ptr_data[i - 1][j] + ptr_data[i + 1][j]) - (ptr_data[i - 1][j - 1] + ptr_data[i + 1][j - 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 2://up
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = 2 * (ptr_data[i][j - 1] + ptr_data[i - 1][j] + ptr_data[i][j + 1]) - (ptr_data[i - 1][j - 1] + ptr_data[i - 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 3://right
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j] = 2 * (ptr_data[i + 1][j] + ptr_data[i][j + 1] + ptr_data[i - 1][j]) - (ptr_data[i - 1][j + 1] + ptr_data[i + 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = 2 * (ptr_data[i + 1][j] + ptr_data[i][j + 1] + ptr_data[i][j - 1]) - (ptr_data[i + 1][j - 1] + ptr_data[i + 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 5://a
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = 2 * (ptr_data[i][j - 1] + ptr_data[i - 1][j] + ptr_data[i + 1][j] + ptr_data[i][j + 1])
- (ptr_data[i - 1][j - 1] + ptr_data[i + 1][j - 1] + ptr_data[i - 1][j + 1] + ptr_data[i + 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_3st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
float ptr_data [15][15];
float ptr_results [15][15];
//return;
int offset = 0;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if ((tile_top_left % src_width < 2) || (tile_top_left % src_width == src_width - 2) || tile_top_left < src_width * 2 || tile_top_left >(src_size - 2*src_width))
return;
}*/
int row, col;
row = tile_top_left / src_width + tile_height + 3;
col = tile_top_left % src_width + tile_width + 3;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 3;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 3;
//int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 3;
//int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 3;
int tile_top_left0 = tile_top_left;
//tile_top_left0 += src_width + 1;
//tile_top_left0 += src_width + 1;
//return;
switch (kernel_index)
{
case 1:
src += src_width * 2;
break;
case 2:
src += src_width * 2 + 1;
break;
case 7://rightdown
src += src_width + 1;
break;
case 3://up
src += 2;
break;
case 8://leftdown
src += src_width;
break;
case 4://down
src += src_width + 2;
break;
//case 2://right
case 6://RU
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
float f1_3 = 1 / 3.0f;
switch (kernel_index)
{
case 1://left
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i][j - 2] = - ptr_data[i][j - 2] + 3 * ptr_data[i][j - 1] - 3 * ptr_data[i][j] + ptr_data[i][j + 1];
}
tile_top_left0 += src_width;
}
break;
case 2://right
for (int i = 0; i < tile_height ; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = - ptr_data[i][j + 2] + 3 * ptr_data[i][j + 1] - 3 * ptr_data[i][j] + ptr_data[i][j - 1];
}
tile_top_left0 += src_width;
}
break;
case 3://up
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width; j++)
{
ptr_results[i - 2][j] = - ptr_data[i - 2][j] + 3 * ptr_data[i - 1][j] - 3 * ptr_data[i][j] + ptr_data[i + 1][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j] = - ptr_data[i + 2][j] + 3 * ptr_data[i + 1][j] - 3 * ptr_data[i][j] + ptr_data[i - 1][j];
}
tile_top_left0 += src_width;
}
break;
case 5://leftup
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] = - ptr_data[i - 2][j - 2] + 3 * ptr_data[i - 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j + 1];
}
tile_top_left0 += src_width;
}
break;
case 6://rightup
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 2][j - 1] = - ptr_data[i - 2][j + 2] + 3 * ptr_data[i - 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j - 1];
}
tile_top_left0 += src_width;
}
break;
case 7://rightdown
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = - ptr_data[i + 2][j + 2] + 3 * ptr_data[i + 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j - 1];
}
tile_top_left0 += src_width;
}
break;
case 8://leftdown
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 1][j - 2] = - ptr_data[i + 2][j - 2] + 3 * ptr_data[i + 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j + 1];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_5x5(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
float ptr_data[15][15];
float ptr_results[15][15];
//return;
int offset = 0;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if ((tile_top_left % src_width < 2) || (tile_top_left % src_width == src_width - 2) || tile_top_left < src_width * 2 || tile_top_left >(src_size - 2 * src_width))
return;
}*/
int row, col;
row = tile_top_left / src_width + tile_height + 4;
col = tile_top_left % src_width + tile_width + 4;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 4;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 4;
//int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 4;
//int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 4;
int tile_top_left0 = tile_top_left;
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
float f1_12 = 1;
float f1_6 = 2;
float f1_2 = 6;
float f2_3 = 8;
switch (kernel_index)
{
case 1://left
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j - 2] + f1_6 * ptr_data[i - 2][j - 1] - f1_6 * ptr_data[i - 2][j] +
+f1_6 * ptr_data[i - 1][j - 2] - f1_2 * ptr_data[i - 1][j - 1] + f2_3 * ptr_data[i - 1][j] +
-f1_6 * ptr_data[i][j - 2] + f2_3 * ptr_data[i][j - 1] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i + 1][j - 2] - f1_2 * ptr_data[i + 1][j - 1] + f2_3 * ptr_data[i + 1][j] +
-f1_12 * ptr_data[i + 2][j - 2] + f1_6 * ptr_data[i + 2][j - 1] - f1_6 * ptr_data[i + 2][j];
}
tile_top_left0 += src_width;
}
break;
case 2://up
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j - 2] + f1_6 * ptr_data[i - 1][j - 2] - f1_6 * ptr_data[i][j - 2] +
+f1_6 * ptr_data[i - 2][j - 1] - f1_2 * ptr_data[i - 1][j - 1] + f2_3 * ptr_data[i][j - 1] +
-f1_6 * ptr_data[i - 2][j] + f2_3 * ptr_data[i - 1][j] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i - 2][j + 1] - f1_2 * ptr_data[i - 1][j + 1] + f2_3 * ptr_data[i][j + 1] +
-f1_12 * ptr_data[i - 2][j + 2] + f1_6 * ptr_data[i - 1][j + 2] - f1_6 * ptr_data[i][j + 2];
}
tile_top_left0 += src_width;
}
break;
case 3://right
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j + 2] + f1_6 * ptr_data[i - 2][j + 1] - f1_6 * ptr_data[i - 2][j] +
+f1_6 * ptr_data[i - 1][j + 2] - f1_2 * ptr_data[i - 1][j + 1] + f2_3 * ptr_data[i - 1][j] +
-f1_6 * ptr_data[i][j + 2] + f2_3 * ptr_data[i][j + 1] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i + 1][j + 2] - f1_2 * ptr_data[i + 1][j + 1] + f2_3 * ptr_data[i + 1][j] +
-f1_12 * ptr_data[i + 2][j + 2] + f1_6 * ptr_data[i + 2][j + 1] - f1_6 * ptr_data[i + 2][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i + 2][j - 2] + f1_6 * ptr_data[i + 1][j - 2] - f1_6 * ptr_data[i][j - 2] +
+f1_6 * ptr_data[i + 2][j - 1] - f1_2 * ptr_data[i + 1][j - 1] + f2_3 * ptr_data[i][j - 1] +
-f1_6 * ptr_data[i + 2][j] + f2_3 * ptr_data[i + 1][j] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i + 2][j + 1] - f1_2 * ptr_data[i + 1][j + 1] + f2_3 * ptr_data[i][j + 1] +
-f1_12 * ptr_data[i + 2][j + 2] + f1_6 * ptr_data[i + 1][j + 2] - f1_6 * ptr_data[i][j + 2];
}
tile_top_left0 += src_width;
}
break;
case 5://a
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j - 2] + f1_6 * ptr_data[i - 2][j - 1] - f1_6 * ptr_data[i - 2][j] + f1_6 * ptr_data[i - 2][j + 1] - f1_12 * ptr_data[i - 2][j + 2] +
+f1_6 * ptr_data[i - 1][j - 2] - f1_2 * ptr_data[i - 1][j - 1] + f2_3 * ptr_data[i - 1][j] - f1_2 * ptr_data[i - 1][j + 1] + f1_6 * ptr_data[i - 1][j + 2] +
-f1_6 * ptr_data[i][j - 2] + f2_3 * ptr_data[i][j - 1] - 12 * ptr_data[i][j] + f2_3 * ptr_data[i][j + 1] - f1_6 * ptr_data[i][j + 2]+
+f1_6 * ptr_data[i + 1][j - 2] - f1_2 * ptr_data[i + 1][j - 1] + f2_3 * ptr_data[i + 1][j] - f1_2 * ptr_data[i + 1][j + 1] + f1_6 * ptr_data[i + 1][j + 2] +
-f1_12 * ptr_data[i + 2][j - 2] + f1_6 * ptr_data[i + 2][j - 1] - f1_6 * ptr_data[i + 2][j] + f1_6 * ptr_data[i + 2][j + 1]- f1_12 * ptr_data[i + 2][j + 2] ;
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
} | ResidualMaker.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "residualMaker.cuh"
#include "stdio.h"
#define KERNELS_COUNT 30
__global__ void make_res_1st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
int row, col;
row = tile_top_left / src_width + tile_height + 1;
col = tile_top_left % src_width + tile_width + 1;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if ((!row) || (!col) || (col >= src_width - 1) || row >= (src_height - 1))
{
return;
}
}*/
float ptr_data[11][11];
float ptr_results[11][11];
//return;
int offset = 0;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width + 1 : tile_height + 1;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width + 1 : tile_width + 1;
int tile_top_left0 = tile_top_left;
//tile_top_left0 += + 1;
switch (kernel_index)
{
case 1://left
case 8://leftdown
src += src_width;
break;
case 2://right
case 7://rightdown
case 4://down
src += src_width + 1;
break;
case 3://up
case 6://RU
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://left
/*for (int i = 0; i < tile_height ; i++)
{
for (int j = 0; j < tile_width; j++)
{
ptr_results[i][j] = ptr_data[i][j];
}
}*/
for (int i = 0; i < tile_height; i++)
{
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = ptr_data[i][j - 1] - ptr_data[i][j];
}
}
break;
case 2://right
for (int i = 0; i < tile_height ; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width; j++)
{
ptr_results[i][j] = ptr_data[i][j + 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 3://up
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j ] = ptr_data[i - 1][j] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i][j] = ptr_data[i + 1][j] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 5://leftup
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = ptr_data[i - 1][j - 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 6://rightup
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j] = ptr_data[i - 1][j + 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 7://rightdown
for (int i = 0; i < tile_height ; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i][j] = ptr_data[i + 1][j + 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 8://leftdown
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = ptr_data[i + 1][j - 1] - ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_2st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
int row, col;
row = tile_top_left / src_width + tile_height + 2;
col = tile_top_left % src_width + tile_width + 2;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if (!(tile_top_left % src_width) || (tile_top_left % src_width == src_width - 1) || tile_top_left < src_width || tile_top_left >(src_size - src_width))
return;
}*/
float ptr_data[11][11];
float ptr_results[11][11];
//return;
int offset = 0;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 2;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 2;
int tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://h
src += src_width;
break;
case 2://v
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://h
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i ][j - 1] = (ptr_data[i][j - 1] + ptr_data[i][j + 1]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 2://v
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width; j++)
{
ptr_results[i - 1][j] = (ptr_data[i - 1][j] + ptr_data[i + 1][j]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 3://diag
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = (ptr_data[i - 1][j - 1] + ptr_data[i + 1][j + 1]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 4://mdiag
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = (ptr_data[i - 1][j + 1] + ptr_data[i + 1][j - 1]) - 2 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_3x3(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
float ptr_data[11][11];
float ptr_results[11][11];
int offset = 0;
int row, col;
row = tile_top_left / src_width + tile_height + 2;
col = tile_top_left % src_width + tile_width + 2;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if (!(tile_top_left % src_width) || (tile_top_left % src_width == src_width - 1) || tile_top_left < src_width || tile_top_left >(src_size - src_width))
return;
}*/
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 2;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 2;
//int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 2;
//int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 2;
int tile_top_left0 = tile_top_left;
//tile_top_left0 += src_width + 1;
switch (kernel_index)
{
case 4://h
src += src_width;
break;
case 3://v
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
switch (kernel_index)
{
case 1://left
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = 2 * (ptr_data[i][j - 1] + ptr_data[i - 1][j] + ptr_data[i + 1][j]) - (ptr_data[i - 1][j - 1] + ptr_data[i + 1][j - 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 2://up
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = 2 * (ptr_data[i][j - 1] + ptr_data[i - 1][j] + ptr_data[i][j + 1]) - (ptr_data[i - 1][j - 1] + ptr_data[i - 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 3://right
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j] = 2 * (ptr_data[i + 1][j] + ptr_data[i][j + 1] + ptr_data[i - 1][j]) - (ptr_data[i - 1][j + 1] + ptr_data[i + 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = 2 * (ptr_data[i + 1][j] + ptr_data[i][j + 1] + ptr_data[i][j - 1]) - (ptr_data[i + 1][j - 1] + ptr_data[i + 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
case 5://a
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = 2 * (ptr_data[i][j - 1] + ptr_data[i - 1][j] + ptr_data[i + 1][j] + ptr_data[i][j + 1])
- (ptr_data[i - 1][j - 1] + ptr_data[i + 1][j - 1] + ptr_data[i - 1][j + 1] + ptr_data[i + 1][j + 1]) - 4 * ptr_data[i][j];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_3st(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
float ptr_data [15][15];
float ptr_results [15][15];
//return;
int offset = 0;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if ((tile_top_left % src_width < 2) || (tile_top_left % src_width == src_width - 2) || tile_top_left < src_width * 2 || tile_top_left >(src_size - 2*src_width))
return;
}*/
int row, col;
row = tile_top_left / src_width + tile_height + 3;
col = tile_top_left % src_width + tile_width + 3;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 3;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 3;
//int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 3;
//int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 3;
int tile_top_left0 = tile_top_left;
//tile_top_left0 += src_width + 1;
//tile_top_left0 += src_width + 1;
//return;
switch (kernel_index)
{
case 1:
src += src_width * 2;
break;
case 2:
src += src_width * 2 + 1;
break;
case 7://rightdown
src += src_width + 1;
break;
case 3://up
src += 2;
break;
case 8://leftdown
src += src_width;
break;
case 4://down
src += src_width + 2;
break;
//case 2://right
case 6://RU
src += 1;
break;
}
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
float f1_3 = 1 / 3.0f;
switch (kernel_index)
{
case 1://left
for (int i = 0; i < tile_height; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i][j - 2] = - ptr_data[i][j - 2] + 3 * ptr_data[i][j - 1] - 3 * ptr_data[i][j] + ptr_data[i][j + 1];
}
tile_top_left0 += src_width;
}
break;
case 2://right
for (int i = 0; i < tile_height ; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i][j - 1] = - ptr_data[i][j + 2] + 3 * ptr_data[i][j + 1] - 3 * ptr_data[i][j] + ptr_data[i][j - 1];
}
tile_top_left0 += src_width;
}
break;
case 3://up
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width; j++)
{
ptr_results[i - 2][j] = - ptr_data[i - 2][j] + 3 * ptr_data[i - 1][j] - 3 * ptr_data[i][j] + ptr_data[i + 1][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 0; j < tile_width ; j++)
{
ptr_results[i - 1][j] = - ptr_data[i + 2][j] + 3 * ptr_data[i + 1][j] - 3 * ptr_data[i][j] + ptr_data[i - 1][j];
}
tile_top_left0 += src_width;
}
break;
case 5://leftup
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] = - ptr_data[i - 2][j - 2] + 3 * ptr_data[i - 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j + 1];
}
tile_top_left0 += src_width;
}
break;
case 6://rightup
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 2][j - 1] = - ptr_data[i - 2][j + 2] + 3 * ptr_data[i - 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i + 1][j - 1];
}
tile_top_left0 += src_width;
}
break;
case 7://rightdown
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 1; j < tile_width + 1; j++)
{
ptr_results[i - 1][j - 1] = - ptr_data[i + 2][j + 2] + 3 * ptr_data[i + 1][j + 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j - 1];
}
tile_top_left0 += src_width;
}
break;
case 8://leftdown
for (int i = 1; i < tile_height + 1; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 1][j - 2] = - ptr_data[i + 2][j - 2] + 3 * ptr_data[i + 1][j - 1] - 3 * ptr_data[i][j] + ptr_data[i - 1][j + 1];
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
}
__global__ void make_res_5x5(float * src, float * dst, int src_width, int src_height, int kernel_index, int tile_width, int tile_height)
{
int src_size = src_height * src_width;
int tile_size = tile_width * tile_height;
int tile_top_left =
(blockIdx.y * tile_height * blockDim.y + threadIdx.y * tile_height) * (src_width/*gridDim.x * blockDim.x * tile_width*/)+
blockIdx.x * (blockDim.x * tile_width) +
threadIdx.x * tile_width;
float ptr_data[15][15];
float ptr_results[15][15];
//return;
int offset = 0;
/*if (blockIdx.x > blockDim.x - 2 || blockIdx.y > blockDim.y - 2 || !blockIdx.x || !blockIdx.y)
{
if ((tile_top_left % src_width < 2) || (tile_top_left % src_width == src_width - 2) || tile_top_left < src_width * 2 || tile_top_left >(src_size - 2 * src_width))
return;
}*/
int row, col;
row = tile_top_left / src_width + tile_height + 4;
col = tile_top_left % src_width + tile_width + 4;
int end0 = row >= (src_height - 1) ? src_height - tile_top_left / src_width : tile_height + 4;
int end1 = (col >= src_width - 1) ? src_width - tile_top_left % src_width : tile_width + 4;
//int end0 = row >= (src_height - 1) ? src_height % 8 : tile_height + 4;
//int end1 = (col >= src_width - 1) ? src_width % 8 : tile_width + 4;
int tile_top_left0 = tile_top_left;
for (int i = 0; i < end0; i++)
{
//offset = i * tile_width;
for (int j = 0; j < end1; j++)
{
ptr_data[i][j] = src[tile_top_left0 + j];
}
tile_top_left0 += src_width;
}
//return;
tile_top_left0 = tile_top_left;
float f1_12 = 1;
float f1_6 = 2;
float f1_2 = 6;
float f2_3 = 8;
switch (kernel_index)
{
case 1://left
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j - 2] + f1_6 * ptr_data[i - 2][j - 1] - f1_6 * ptr_data[i - 2][j] +
+f1_6 * ptr_data[i - 1][j - 2] - f1_2 * ptr_data[i - 1][j - 1] + f2_3 * ptr_data[i - 1][j] +
-f1_6 * ptr_data[i][j - 2] + f2_3 * ptr_data[i][j - 1] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i + 1][j - 2] - f1_2 * ptr_data[i + 1][j - 1] + f2_3 * ptr_data[i + 1][j] +
-f1_12 * ptr_data[i + 2][j - 2] + f1_6 * ptr_data[i + 2][j - 1] - f1_6 * ptr_data[i + 2][j];
}
tile_top_left0 += src_width;
}
break;
case 2://up
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j - 2] + f1_6 * ptr_data[i - 1][j - 2] - f1_6 * ptr_data[i][j - 2] +
+f1_6 * ptr_data[i - 2][j - 1] - f1_2 * ptr_data[i - 1][j - 1] + f2_3 * ptr_data[i][j - 1] +
-f1_6 * ptr_data[i - 2][j] + f2_3 * ptr_data[i - 1][j] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i - 2][j + 1] - f1_2 * ptr_data[i - 1][j + 1] + f2_3 * ptr_data[i][j + 1] +
-f1_12 * ptr_data[i - 2][j + 2] + f1_6 * ptr_data[i - 1][j + 2] - f1_6 * ptr_data[i][j + 2];
}
tile_top_left0 += src_width;
}
break;
case 3://right
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j + 2] + f1_6 * ptr_data[i - 2][j + 1] - f1_6 * ptr_data[i - 2][j] +
+f1_6 * ptr_data[i - 1][j + 2] - f1_2 * ptr_data[i - 1][j + 1] + f2_3 * ptr_data[i - 1][j] +
-f1_6 * ptr_data[i][j + 2] + f2_3 * ptr_data[i][j + 1] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i + 1][j + 2] - f1_2 * ptr_data[i + 1][j + 1] + f2_3 * ptr_data[i + 1][j] +
-f1_12 * ptr_data[i + 2][j + 2] + f1_6 * ptr_data[i + 2][j + 1] - f1_6 * ptr_data[i + 2][j];
}
tile_top_left0 += src_width;
}
break;
case 4://down
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i + 2][j - 2] + f1_6 * ptr_data[i + 1][j - 2] - f1_6 * ptr_data[i][j - 2] +
+f1_6 * ptr_data[i + 2][j - 1] - f1_2 * ptr_data[i + 1][j - 1] + f2_3 * ptr_data[i][j - 1] +
-f1_6 * ptr_data[i + 2][j] + f2_3 * ptr_data[i + 1][j] - 12 * ptr_data[i][j] +
+f1_6 * ptr_data[i + 2][j + 1] - f1_2 * ptr_data[i + 1][j + 1] + f2_3 * ptr_data[i][j + 1] +
-f1_12 * ptr_data[i + 2][j + 2] + f1_6 * ptr_data[i + 1][j + 2] - f1_6 * ptr_data[i][j + 2];
}
tile_top_left0 += src_width;
}
break;
case 5://a
for (int i = 2; i < tile_height + 2; i++)
{
//offset = i * tile_width;
for (int j = 2; j < tile_width + 2; j++)
{
ptr_results[i - 2][j - 2] =
-f1_12 * ptr_data[i - 2][j - 2] + f1_6 * ptr_data[i - 2][j - 1] - f1_6 * ptr_data[i - 2][j] + f1_6 * ptr_data[i - 2][j + 1] - f1_12 * ptr_data[i - 2][j + 2] +
+f1_6 * ptr_data[i - 1][j - 2] - f1_2 * ptr_data[i - 1][j - 1] + f2_3 * ptr_data[i - 1][j] - f1_2 * ptr_data[i - 1][j + 1] + f1_6 * ptr_data[i - 1][j + 2] +
-f1_6 * ptr_data[i][j - 2] + f2_3 * ptr_data[i][j - 1] - 12 * ptr_data[i][j] + f2_3 * ptr_data[i][j + 1] - f1_6 * ptr_data[i][j + 2]+
+f1_6 * ptr_data[i + 1][j - 2] - f1_2 * ptr_data[i + 1][j - 1] + f2_3 * ptr_data[i + 1][j] - f1_2 * ptr_data[i + 1][j + 1] + f1_6 * ptr_data[i + 1][j + 2] +
-f1_12 * ptr_data[i + 2][j - 2] + f1_6 * ptr_data[i + 2][j - 1] - f1_6 * ptr_data[i + 2][j] + f1_6 * ptr_data[i + 2][j + 1]- f1_12 * ptr_data[i + 2][j + 2] ;
}
tile_top_left0 += src_width;
}
break;
}
tile_top_left0 = tile_top_left;
for (int i = 0; i < tile_height; i++)
{
for (int j = 0; j < tile_width; j++)
{
dst[tile_top_left0 + j] = ptr_results[i][j];
}
tile_top_left0 += src_width;
}
} |
280d9a8791bb7490113e20aa77a5944abc7b1906.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <lmnslsqr/lmnslsqr.h>
#include <lmnslsqr/qmatrix.h>
#include <lmnslsqr/nslsqr.h>
#include <lmnslsqr/kernel.h>
#include <lmnslsqr/aux.h>
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <lmnslsqr/error.h>
#define MAX(X, Y) (((X) < (Y)) ? (Y) : (X))
////////////////////////////////////////////////////////////////
//////////////// UTILITY FUNCTIONS /////////////////////////////
////////////////////////////////////////////////////////////////
void _print_lmnslsqr_parameters(
int M, int N,
double ini_factor, double min_factor, double red_factor, double inc_factor,
double min_mu, double lower_mu, double upper_mu,
int maxit,
double tol, double dtol, int dcount,
int maxnslsqrit, int maxnslsqrrestart,
double rtol, double stol, int siter, double ptol, int piter,
int n_layers, int *bit_list, double qtol )
{
fprintf( stderr, "\n---------------------------------------\n" );
fprintf( stderr, "Report for initial parameters of lm-nsLSQR\n" );
fprintf( stderr, "Problem dimension: %d x %d\n", M, N );
fprintf( stderr, "Factors:\n" );
fprintf( stderr, "\tInitial damping : %.20f\n", ini_factor );
fprintf( stderr, "\tMinimal damping : %.20f\n", min_factor );
fprintf( stderr, "\tReduction damping: %.20f\n", red_factor );
fprintf( stderr, "\tIncrease damping : %.20f\n", inc_factor );
fprintf( stderr, "\tMinimal mu : %.20f\n", min_mu );
fprintf( stderr, "\tLower bound mu : %.20f\n", lower_mu );
fprintf( stderr, "\tUpper bound mu : %.20f\n", upper_mu );
fprintf( stderr, "Iterations:\n" );
fprintf( stderr, "\tlm-nsLSQR max. iterations: %d\n", maxit );
fprintf( stderr, "\tnsLSQR max. iterations : %d\n", maxnslsqrit );
fprintf( stderr, "\tnsLSQR max. restarts : %d\n", maxnslsqrrestart );
fprintf( stderr, "Tolerances:\n" );
fprintf( stderr, "\tlm-nsLSQR rel. residual tolerance : %.20f\n", tol );
fprintf( stderr, "\tlm-nsLSQR solution diff. tolerance: %.20f\n", dtol );
fprintf( stderr, "\tlm-nsLSQR solution diff. count : %.20f\n", dcount );
fprintf( stderr, "\tnsLSQR rel. residual tolerance : %.20f\n", rtol );
fprintf( stderr, "\tnsLSQR solution diff. tolerance : %.20f\n", stol );
fprintf( stderr, "\tnsLSQR solution diff. count : %.20f\n", siter );
fprintf( stderr, "\tnsLSQR saturation tolerance : %.20f\n", ptol );
fprintf( stderr, "\tnsLSQR saturation count : %.20f\n", piter );
fprintf( stderr, "Quantization:\n" );
fprintf( stderr, "\tNumber of layers : %d\n", n_layers );
fprintf( stderr, "\tBits:" );
for( int i = 0; i < n_layers; ++i )
fprintf( stderr, " %d", bit_list[i] );
fprintf( stderr, "\n" );
fprintf( stderr, "\tQuantization tolerance: %.20f\n", qtol );
fprintf( stderr, "\n---------------------------------------\n" );
}
void _print_memory_information(
int M, int N, int nslsqrit, int *bit_list, int nlevels )
{
int sum_bit = 0;
for (int i = 0; i < nlevels; ++i)
sum_bit = sum_bit + bit_list[i];
double quant_memory =
1e-9*( (M*N*sum_bit)/8.0 + N*nlevels*8.0 + M*(32.0 + 64.0) + N*8.0 );
double nslsqr_memory =
1e-9*8*(7*N + 4*M + nslsqrit*(N + M + 4*nslsqrit + 5) + 1);
double solver_memory = 1e-9*8*(4*M + 5*N) + nslsqr_memory + quant_memory;
fprintf( stderr, "\n---------------------------------------\n" );
fprintf( stderr, "Report of memory used by lmnslsqr is:\n" );
fprintf( stderr, "Quantization: %f\n", quant_memory );
fprintf( stderr, "nsLSQR: %f\n", nslsqr_memory );
fprintf( stderr, "lmnslsqr: %f\n", solver_memory );
fprintf( stderr, "If the used memory is greater than the available memory, the program will be terminated.\n" );
fprintf( stderr, "\n---------------------------------------\n" );
}
////////////////////////////////////////////////////////////////
//////////////////// LMILSQR implementation ////////////////////
////////////////////////////////////////////////////////////////
void lmnslsqr(
void (*func)(int, int, const double *, double *),
int M,
int N,
const double *dev_x0,
double ini_factor,
double min_factor,
double red_factor,
double inc_factor,
double min_mu,
double lower_mu,
double upper_mu,
int maxit,
double tol,
double dtol,
int dcount,
double eps,
int maxnslsqrit,
int maxnslsqrrestart,
double rtol,
double stol,
int siter,
double ptol,
int piter,
int n_layers,
int *bit_list,
double qtol,
hipblasHandle_t *handle,
double *dev_out,
double *residual_out,
int seed,
bool debug
)
{
if( debug ) {
// Print parameter information
_print_lmnslsqr_parameters(
M, N, ini_factor, min_factor, red_factor, inc_factor, min_mu,
lower_mu, upper_mu, maxit, tol, dtol, dcount, maxnslsqrit,
maxnslsqrrestart, rtol, stol, siter, ptol, piter, n_layers,
bit_list, qtol );
// Memory information
_print_memory_information(M, N, maxnslsqrit, bit_list, n_layers);
}
// Setting seed
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(prng, seed);
// Evaluate initial guess
double *dev_Fx0;
cuda_check_error(
hipMalloc(&dev_Fx0, sizeof(double)*M),
"lmnslsqr, allocation for Fxo\n"
);
func(M, N, dev_x0, dev_Fx0);
// Compute initial residual
double initial_residual;
cublas_check_error(
hipblasDnrm2(*handle, M, dev_Fx0, 1, &initial_residual),
"lmnslsqr, Computing the initial residual\n"
);
// LM factor or damping parameter
double factor = ini_factor;
// Iteration solution
double *dev_xi;
cuda_check_error(
hipMalloc(&dev_xi, sizeof(double)*N),
"lmnslsqr, Allocation for xi\n"
);
cuda_check_error(
hipMemcpy(dev_xi, dev_x0, sizeof(double)*N, hipMemcpyDeviceToDevice),
"lmnslsqr, copy from x0 to xi\n"
);
// Vector to evaluate current solution
double *dev_Fxi;
double norm_Fxi;
cuda_check_error(
hipMalloc(&dev_Fxi, sizeof(double)*M),
"lmnslsqr, Allocation for Fxi\n"
);
// Initial evaluation
func(M, N, dev_xi, dev_Fxi);
cublas_check_error(
hipblasDnrm2(*handle, M, dev_Fxi, 1, &norm_Fxi),
"lmnslsqr, Computation of residual of current solution\n"
);
// Vector to use as initial guess, which will be equal to zero
double *dev_ig;
cuda_check_error(
hipMalloc(&dev_ig, sizeof(double)*N),
"lmnslsqr, Allocation for the initial guess\n"
);
// Vector for LM step
double *dev_dx;
cuda_check_error(
hipMalloc(&dev_dx, sizeof(double)*N),
"lmnslsqr, Allocation of the step\n"
);
// Vector for update candidate
double *dev_y, *dev_Fy;
cuda_check_error(
hipMalloc(&dev_y, sizeof(double)*N),
"lmnslsqr, Allocation for candidate y\n"
);
cuda_check_error(
hipMalloc(&dev_Fy, sizeof(double)*M),
"lmnslsqr, Allocation for F(y)\n"
);
// Auxiliar vectors
double *dev_auxM, *dev_auxN;
cuda_check_error(
hipMalloc(&dev_auxM, sizeof(double)*M),
"lmnslsqr, Allocation for auxiliar vector 1\n"
);
cuda_check_error(
hipMalloc(&dev_auxN, sizeof(double)*N),
"lmnslsqr, Allocation for auxiliar vector 2\n"
);
// Vector for prediction updates
double ared, pred, norm1, gamma, norm2;
// Quantization matrix structure
struct qmatrix *qm = NULL;
init_jac_approx(M, N);
// Flag for quantization jacobian update
int update_quantjac = 1;
// double for alpha values
double alpha;
// Variables for computation of differecen tolerance
double relstep = 1;
int count = 0;
int val_change = 1;
// Index count for residual list
int residual_index = 0;
// Residual out for nslsqr
double *res_nslsqr = (double *) malloc(
sizeof(double)*( maxnslsqrrestart*( maxnslsqrit + 1) ) );
// Iteration of LM
for (int k = 0; k < maxit; ++k) {
if (debug)
fprintf(stderr, "DEBUG: LM Iteration %d\n", k);
if( debug )
fprintf(stderr, "%.20f\n", norm_Fxi/initial_residual);
// Tolerance test
if (val_change) {
if (relstep <= dtol)
++count;
else
count = 0;
}
if (norm_Fxi/initial_residual < tol || count >= dcount) {
if( debug ) {
fprintf( stderr, "DEBUG: lm-nsLSQR finalized by: ");
if( norm_Fxi/initial_residual < tol ) {
fprintf( stderr, "rel. residual achieved\n" );
}
else {
fprintf( stderr, "solution diff. tolerance achieved.\n" );
}
}
k = maxit;
}
// Tolerance not achieved
// Continue to the next iteration of LM
else {
// Compute the Quantization of hte Jacobian matrix
if (update_quantjac) {
// Evaluate current solution since it has changed
func(M, N, dev_xi, dev_Fxi);
cublas_check_error(
hipblasDnrm2(*handle, M, dev_Fxi, 1, &norm_Fxi),
"lmnslsqr, Computation of residual of current solution\n"
);
if( debug ) {
fprintf( stderr, "DEBUG: New residua: %.20f\n",
norm_Fxi/initial_residual );
}
// Store residual
if (residual_out != NULL)
residual_out[residual_index++] = norm_Fxi/initial_residual;
qm = (struct qmatrix *) malloc(sizeof(struct qmatrix));
init_qmatrix(qm, func, M, N, n_layers, bit_list, qtol, dev_xi, handle);
}
// Solve by nslsqr
hipLaunchKernelGGL(( kernel_set_value), dim3((N+255)/256), dim3(256), 0, 0, dev_ig, N, 0);
if (debug)
fprintf(stderr, "DEBUG: Calling nslsqr\n");
nslsqr(
qm, func, dev_xi, dev_ig, dev_Fxi, rtol, stol, siter,
ptol, piter, maxnslsqrit, maxnslsqrrestart, factor,
dev_dx, res_nslsqr, &prng, false );
// Define prediction rates
cuda_check_error(
hipMemcpy(dev_y, dev_xi, sizeof(double)*N,
hipMemcpyDeviceToDevice),
"lmnslsqr, Copy from xi to y\n"
);
alpha = -1;
cublas_check_error(
hipblasDaxpy(*handle, N, &alpha, dev_dx, 1, dev_y, 1), "lmnslsqr, first part of prediction rate\n"
);
func(M, N, dev_y, dev_Fy);
// Actual reduction
cublas_check_error(
hipblasDnrm2(*handle, M, dev_Fy, 1, &norm1),
"lmnslsqr, norm compuation\n"
);
ared = norm_Fxi*norm_Fxi - norm1*norm1;
// Predicted reduction
jac_approx(M, N, func, dev_xi, dev_dx, dev_auxM, eps, handle);
cublas_check_error(
hipblasDnrm2(*handle, M, dev_auxM, 1, &norm1),
"lmnslsqr, norm computation\n"
);
cublas_check_error(
hipblasDdot(*handle, M, dev_Fxi, 1, dev_auxM, 1, &norm2),
"lmnslsqr, dot Fxi and auxM\n"
);
pred = 2*norm2 - norm1*norm1;
gamma = ared/pred;
// Compute the following LM damping factor
if (debug)
fprintf(stderr, "DEBUG: Starting damping factor update\n");
if (gamma < min_mu) {
update_quantjac = 0;
factor = MAX(inc_factor*factor, min_factor);
val_change = 0;
if( debug )
fprintf( stderr, "DEBUG: No update of damping factor\n" );
}
else if (min_mu <= gamma && gamma < lower_mu) {
update_quantjac = 1;
cuda_check_error(
hipMemcpy(dev_auxN, dev_y, sizeof(double)*N,
hipMemcpyDeviceToDevice),
"lmnslsqr, copy from y to auxN\n"
);
alpha = -1;
cublas_check_error(
hipblasDnrm2(*handle, N, dev_auxN, 1, &norm1),
"lmnslsqr, norm in auxN\n"
);
cublas_check_error(
hipblasDaxpy(*handle, N, &alpha, dev_xi, 1, dev_auxN, 1),
"lmnslsqr, axpy xi and auxN\n"
);
cublas_check_error(
hipblasDnrm2(*handle, N, dev_auxN, 1, &norm2),
"lmnslsqr, norm computation in second case of gamma test\n"
);
relstep = norm2/norm1;
cuda_check_error(
hipMemcpy(dev_xi, dev_y, sizeof(double)*N,
hipMemcpyDeviceToDevice),
"lmnslsqr, copy from y to xi\n"
);
factor = MAX(inc_factor*factor, min_factor);
val_change = 1;
if( debug ) {
fprintf( stderr, "DEBUG: Damping factor increased\n" );
}
}
else if (lower_mu <= gamma) {
update_quantjac = 1;
cuda_check_error(
hipMemcpy(dev_auxN, dev_y, sizeof(double)*N,
hipMemcpyDeviceToDevice),
"lmnslsqr, copy from y to auxN in lower_mu <= gamma\n"
);
alpha = -1;
cublas_check_error(
hipblasDnrm2(*handle, N, dev_auxN, 1, &norm1),
"lmnslsqr, norm computation\n"
);
cublas_check_error(
hipblasDaxpy(*handle, N, &alpha, dev_xi, 1, dev_auxN, 1),
"lmnslsqr, axpy in xi\n"
);
cublas_check_error(
hipblasDnrm2(*handle, N, dev_auxN, 1, &norm2),
"lmnslsqr, norm of auxN\n"
);
relstep = norm2/norm1;
cuda_check_error(
hipMemcpy(dev_xi, dev_y, sizeof(double)*N,
hipMemcpyDeviceToDevice),
"lmnslsqr, copy form y to xi in lower_mu <= gamma\n"
);
val_change = 1;
if (upper_mu < gamma) {
factor = red_factor*factor;
if( debug ) {
fprintf( stderr, "DEBUG: Damping factor decreased\n" );
}
}
else if( debug ) {
fprintf( stderr, "DEBUG: Damping factor maintained.\n" );
}
}
if (factor < min_factor) {
factor = min_factor;
if( debug ) {
fprintf( stderr, "DEBUG: Damping factor set to minimum\n" );
}
}
// Free Quantziation matrix
if (update_quantjac) {
free_qmatrix(qm);
free(qm);
qm = NULL;
if( debug ) {
fprintf( stderr, "DEBUG: Need to update Jacobian\n" );
}
}
}
}
// Add last element for marking
if (residual_index <= maxit && residual_out != NULL)
residual_out[residual_index] = -1;
if( debug ) {
fprintf( stderr, "DEBUG: Finalizing lm-nsLSQR\n" );
}
// Copy the solution to the output variable
hipMemcpy(dev_out, dev_xi, sizeof(double)*N, hipMemcpyDeviceToDevice);
hiprandDestroyGenerator(prng);
free_jac_approx();
// Free used memory
free( res_nslsqr );
hipFree(dev_Fx0);
hipFree(dev_xi);
hipFree(dev_Fxi);
hipFree(dev_ig);
hipFree(dev_dx);
hipFree(dev_y);
hipFree(dev_Fy);
hipFree(dev_auxM);
hipFree(dev_auxN);
if (qm != NULL) {
free_qmatrix(qm);
free(qm);
}
} | 280d9a8791bb7490113e20aa77a5944abc7b1906.cu | #include <lmnslsqr/lmnslsqr.h>
#include <lmnslsqr/qmatrix.h>
#include <lmnslsqr/nslsqr.h>
#include <lmnslsqr/kernel.h>
#include <lmnslsqr/aux.h>
#include <curand.h>
#include <stdio.h>
#include <lmnslsqr/error.h>
#define MAX(X, Y) (((X) < (Y)) ? (Y) : (X))
////////////////////////////////////////////////////////////////
//////////////// UTILITY FUNCTIONS /////////////////////////////
////////////////////////////////////////////////////////////////
void _print_lmnslsqr_parameters(
int M, int N,
double ini_factor, double min_factor, double red_factor, double inc_factor,
double min_mu, double lower_mu, double upper_mu,
int maxit,
double tol, double dtol, int dcount,
int maxnslsqrit, int maxnslsqrrestart,
double rtol, double stol, int siter, double ptol, int piter,
int n_layers, int *bit_list, double qtol )
{
fprintf( stderr, "\n---------------------------------------\n" );
fprintf( stderr, "Report for initial parameters of lm-nsLSQR\n" );
fprintf( stderr, "Problem dimension: %d x %d\n", M, N );
fprintf( stderr, "Factors:\n" );
fprintf( stderr, "\tInitial damping : %.20f\n", ini_factor );
fprintf( stderr, "\tMinimal damping : %.20f\n", min_factor );
fprintf( stderr, "\tReduction damping: %.20f\n", red_factor );
fprintf( stderr, "\tIncrease damping : %.20f\n", inc_factor );
fprintf( stderr, "\tMinimal mu : %.20f\n", min_mu );
fprintf( stderr, "\tLower bound mu : %.20f\n", lower_mu );
fprintf( stderr, "\tUpper bound mu : %.20f\n", upper_mu );
fprintf( stderr, "Iterations:\n" );
fprintf( stderr, "\tlm-nsLSQR max. iterations: %d\n", maxit );
fprintf( stderr, "\tnsLSQR max. iterations : %d\n", maxnslsqrit );
fprintf( stderr, "\tnsLSQR max. restarts : %d\n", maxnslsqrrestart );
fprintf( stderr, "Tolerances:\n" );
fprintf( stderr, "\tlm-nsLSQR rel. residual tolerance : %.20f\n", tol );
fprintf( stderr, "\tlm-nsLSQR solution diff. tolerance: %.20f\n", dtol );
fprintf( stderr, "\tlm-nsLSQR solution diff. count : %.20f\n", dcount );
fprintf( stderr, "\tnsLSQR rel. residual tolerance : %.20f\n", rtol );
fprintf( stderr, "\tnsLSQR solution diff. tolerance : %.20f\n", stol );
fprintf( stderr, "\tnsLSQR solution diff. count : %.20f\n", siter );
fprintf( stderr, "\tnsLSQR saturation tolerance : %.20f\n", ptol );
fprintf( stderr, "\tnsLSQR saturation count : %.20f\n", piter );
fprintf( stderr, "Quantization:\n" );
fprintf( stderr, "\tNumber of layers : %d\n", n_layers );
fprintf( stderr, "\tBits:" );
for( int i = 0; i < n_layers; ++i )
fprintf( stderr, " %d", bit_list[i] );
fprintf( stderr, "\n" );
fprintf( stderr, "\tQuantization tolerance: %.20f\n", qtol );
fprintf( stderr, "\n---------------------------------------\n" );
}
void _print_memory_information(
int M, int N, int nslsqrit, int *bit_list, int nlevels )
{
int sum_bit = 0;
for (int i = 0; i < nlevels; ++i)
sum_bit = sum_bit + bit_list[i];
double quant_memory =
1e-9*( (M*N*sum_bit)/8.0 + N*nlevels*8.0 + M*(32.0 + 64.0) + N*8.0 );
double nslsqr_memory =
1e-9*8*(7*N + 4*M + nslsqrit*(N + M + 4*nslsqrit + 5) + 1);
double solver_memory = 1e-9*8*(4*M + 5*N) + nslsqr_memory + quant_memory;
fprintf( stderr, "\n---------------------------------------\n" );
fprintf( stderr, "Report of memory used by lmnslsqr is:\n" );
fprintf( stderr, "Quantization: %f\n", quant_memory );
fprintf( stderr, "nsLSQR: %f\n", nslsqr_memory );
fprintf( stderr, "lmnslsqr: %f\n", solver_memory );
fprintf( stderr, "If the used memory is greater than the available memory, the program will be terminated.\n" );
fprintf( stderr, "\n---------------------------------------\n" );
}
////////////////////////////////////////////////////////////////
//////////////////// LMILSQR implementation ////////////////////
////////////////////////////////////////////////////////////////
void lmnslsqr(
void (*func)(int, int, const double *, double *),
int M,
int N,
const double *dev_x0,
double ini_factor,
double min_factor,
double red_factor,
double inc_factor,
double min_mu,
double lower_mu,
double upper_mu,
int maxit,
double tol,
double dtol,
int dcount,
double eps,
int maxnslsqrit,
int maxnslsqrrestart,
double rtol,
double stol,
int siter,
double ptol,
int piter,
int n_layers,
int *bit_list,
double qtol,
cublasHandle_t *handle,
double *dev_out,
double *residual_out,
int seed,
bool debug
)
{
if( debug ) {
// Print parameter information
_print_lmnslsqr_parameters(
M, N, ini_factor, min_factor, red_factor, inc_factor, min_mu,
lower_mu, upper_mu, maxit, tol, dtol, dcount, maxnslsqrit,
maxnslsqrrestart, rtol, stol, siter, ptol, piter, n_layers,
bit_list, qtol );
// Memory information
_print_memory_information(M, N, maxnslsqrit, bit_list, n_layers);
}
// Setting seed
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(prng, seed);
// Evaluate initial guess
double *dev_Fx0;
cuda_check_error(
cudaMalloc(&dev_Fx0, sizeof(double)*M),
"lmnslsqr, allocation for Fxo\n"
);
func(M, N, dev_x0, dev_Fx0);
// Compute initial residual
double initial_residual;
cublas_check_error(
cublasDnrm2(*handle, M, dev_Fx0, 1, &initial_residual),
"lmnslsqr, Computing the initial residual\n"
);
// LM factor or damping parameter
double factor = ini_factor;
// Iteration solution
double *dev_xi;
cuda_check_error(
cudaMalloc(&dev_xi, sizeof(double)*N),
"lmnslsqr, Allocation for xi\n"
);
cuda_check_error(
cudaMemcpy(dev_xi, dev_x0, sizeof(double)*N, cudaMemcpyDeviceToDevice),
"lmnslsqr, copy from x0 to xi\n"
);
// Vector to evaluate current solution
double *dev_Fxi;
double norm_Fxi;
cuda_check_error(
cudaMalloc(&dev_Fxi, sizeof(double)*M),
"lmnslsqr, Allocation for Fxi\n"
);
// Initial evaluation
func(M, N, dev_xi, dev_Fxi);
cublas_check_error(
cublasDnrm2(*handle, M, dev_Fxi, 1, &norm_Fxi),
"lmnslsqr, Computation of residual of current solution\n"
);
// Vector to use as initial guess, which will be equal to zero
double *dev_ig;
cuda_check_error(
cudaMalloc(&dev_ig, sizeof(double)*N),
"lmnslsqr, Allocation for the initial guess\n"
);
// Vector for LM step
double *dev_dx;
cuda_check_error(
cudaMalloc(&dev_dx, sizeof(double)*N),
"lmnslsqr, Allocation of the step\n"
);
// Vector for update candidate
double *dev_y, *dev_Fy;
cuda_check_error(
cudaMalloc(&dev_y, sizeof(double)*N),
"lmnslsqr, Allocation for candidate y\n"
);
cuda_check_error(
cudaMalloc(&dev_Fy, sizeof(double)*M),
"lmnslsqr, Allocation for F(y)\n"
);
// Auxiliar vectors
double *dev_auxM, *dev_auxN;
cuda_check_error(
cudaMalloc(&dev_auxM, sizeof(double)*M),
"lmnslsqr, Allocation for auxiliar vector 1\n"
);
cuda_check_error(
cudaMalloc(&dev_auxN, sizeof(double)*N),
"lmnslsqr, Allocation for auxiliar vector 2\n"
);
// Vector for prediction updates
double ared, pred, norm1, gamma, norm2;
// Quantization matrix structure
struct qmatrix *qm = NULL;
init_jac_approx(M, N);
// Flag for quantization jacobian update
int update_quantjac = 1;
// double for alpha values
double alpha;
// Variables for computation of differecen tolerance
double relstep = 1;
int count = 0;
int val_change = 1;
// Index count for residual list
int residual_index = 0;
// Residual out for nslsqr
double *res_nslsqr = (double *) malloc(
sizeof(double)*( maxnslsqrrestart*( maxnslsqrit + 1) ) );
// Iteration of LM
for (int k = 0; k < maxit; ++k) {
if (debug)
fprintf(stderr, "DEBUG: LM Iteration %d\n", k);
if( debug )
fprintf(stderr, "%.20f\n", norm_Fxi/initial_residual);
// Tolerance test
if (val_change) {
if (relstep <= dtol)
++count;
else
count = 0;
}
if (norm_Fxi/initial_residual < tol || count >= dcount) {
if( debug ) {
fprintf( stderr, "DEBUG: lm-nsLSQR finalized by: ");
if( norm_Fxi/initial_residual < tol ) {
fprintf( stderr, "rel. residual achieved\n" );
}
else {
fprintf( stderr, "solution diff. tolerance achieved.\n" );
}
}
k = maxit;
}
// Tolerance not achieved
// Continue to the next iteration of LM
else {
// Compute the Quantization of hte Jacobian matrix
if (update_quantjac) {
// Evaluate current solution since it has changed
func(M, N, dev_xi, dev_Fxi);
cublas_check_error(
cublasDnrm2(*handle, M, dev_Fxi, 1, &norm_Fxi),
"lmnslsqr, Computation of residual of current solution\n"
);
if( debug ) {
fprintf( stderr, "DEBUG: New residua: %.20f\n",
norm_Fxi/initial_residual );
}
// Store residual
if (residual_out != NULL)
residual_out[residual_index++] = norm_Fxi/initial_residual;
qm = (struct qmatrix *) malloc(sizeof(struct qmatrix));
init_qmatrix(qm, func, M, N, n_layers, bit_list, qtol, dev_xi, handle);
}
// Solve by nslsqr
kernel_set_value<<< (N+255)/256, 256>>>(dev_ig, N, 0);
if (debug)
fprintf(stderr, "DEBUG: Calling nslsqr\n");
nslsqr(
qm, func, dev_xi, dev_ig, dev_Fxi, rtol, stol, siter,
ptol, piter, maxnslsqrit, maxnslsqrrestart, factor,
dev_dx, res_nslsqr, &prng, false );
// Define prediction rates
cuda_check_error(
cudaMemcpy(dev_y, dev_xi, sizeof(double)*N,
cudaMemcpyDeviceToDevice),
"lmnslsqr, Copy from xi to y\n"
);
alpha = -1;
cublas_check_error(
cublasDaxpy(*handle, N, &alpha, dev_dx, 1, dev_y, 1), "lmnslsqr, first part of prediction rate\n"
);
func(M, N, dev_y, dev_Fy);
// Actual reduction
cublas_check_error(
cublasDnrm2(*handle, M, dev_Fy, 1, &norm1),
"lmnslsqr, norm compuation\n"
);
ared = norm_Fxi*norm_Fxi - norm1*norm1;
// Predicted reduction
jac_approx(M, N, func, dev_xi, dev_dx, dev_auxM, eps, handle);
cublas_check_error(
cublasDnrm2(*handle, M, dev_auxM, 1, &norm1),
"lmnslsqr, norm computation\n"
);
cublas_check_error(
cublasDdot(*handle, M, dev_Fxi, 1, dev_auxM, 1, &norm2),
"lmnslsqr, dot Fxi and auxM\n"
);
pred = 2*norm2 - norm1*norm1;
gamma = ared/pred;
// Compute the following LM damping factor
if (debug)
fprintf(stderr, "DEBUG: Starting damping factor update\n");
if (gamma < min_mu) {
update_quantjac = 0;
factor = MAX(inc_factor*factor, min_factor);
val_change = 0;
if( debug )
fprintf( stderr, "DEBUG: No update of damping factor\n" );
}
else if (min_mu <= gamma && gamma < lower_mu) {
update_quantjac = 1;
cuda_check_error(
cudaMemcpy(dev_auxN, dev_y, sizeof(double)*N,
cudaMemcpyDeviceToDevice),
"lmnslsqr, copy from y to auxN\n"
);
alpha = -1;
cublas_check_error(
cublasDnrm2(*handle, N, dev_auxN, 1, &norm1),
"lmnslsqr, norm in auxN\n"
);
cublas_check_error(
cublasDaxpy(*handle, N, &alpha, dev_xi, 1, dev_auxN, 1),
"lmnslsqr, axpy xi and auxN\n"
);
cublas_check_error(
cublasDnrm2(*handle, N, dev_auxN, 1, &norm2),
"lmnslsqr, norm computation in second case of gamma test\n"
);
relstep = norm2/norm1;
cuda_check_error(
cudaMemcpy(dev_xi, dev_y, sizeof(double)*N,
cudaMemcpyDeviceToDevice),
"lmnslsqr, copy from y to xi\n"
);
factor = MAX(inc_factor*factor, min_factor);
val_change = 1;
if( debug ) {
fprintf( stderr, "DEBUG: Damping factor increased\n" );
}
}
else if (lower_mu <= gamma) {
update_quantjac = 1;
cuda_check_error(
cudaMemcpy(dev_auxN, dev_y, sizeof(double)*N,
cudaMemcpyDeviceToDevice),
"lmnslsqr, copy from y to auxN in lower_mu <= gamma\n"
);
alpha = -1;
cublas_check_error(
cublasDnrm2(*handle, N, dev_auxN, 1, &norm1),
"lmnslsqr, norm computation\n"
);
cublas_check_error(
cublasDaxpy(*handle, N, &alpha, dev_xi, 1, dev_auxN, 1),
"lmnslsqr, axpy in xi\n"
);
cublas_check_error(
cublasDnrm2(*handle, N, dev_auxN, 1, &norm2),
"lmnslsqr, norm of auxN\n"
);
relstep = norm2/norm1;
cuda_check_error(
cudaMemcpy(dev_xi, dev_y, sizeof(double)*N,
cudaMemcpyDeviceToDevice),
"lmnslsqr, copy form y to xi in lower_mu <= gamma\n"
);
val_change = 1;
if (upper_mu < gamma) {
factor = red_factor*factor;
if( debug ) {
fprintf( stderr, "DEBUG: Damping factor decreased\n" );
}
}
else if( debug ) {
fprintf( stderr, "DEBUG: Damping factor maintained.\n" );
}
}
if (factor < min_factor) {
factor = min_factor;
if( debug ) {
fprintf( stderr, "DEBUG: Damping factor set to minimum\n" );
}
}
// Free Quantziation matrix
if (update_quantjac) {
free_qmatrix(qm);
free(qm);
qm = NULL;
if( debug ) {
fprintf( stderr, "DEBUG: Need to update Jacobian\n" );
}
}
}
}
// Add last element for marking
if (residual_index <= maxit && residual_out != NULL)
residual_out[residual_index] = -1;
if( debug ) {
fprintf( stderr, "DEBUG: Finalizing lm-nsLSQR\n" );
}
// Copy the solution to the output variable
cudaMemcpy(dev_out, dev_xi, sizeof(double)*N, cudaMemcpyDeviceToDevice);
curandDestroyGenerator(prng);
free_jac_approx();
// Free used memory
free( res_nslsqr );
cudaFree(dev_Fx0);
cudaFree(dev_xi);
cudaFree(dev_Fxi);
cudaFree(dev_ig);
cudaFree(dev_dx);
cudaFree(dev_y);
cudaFree(dev_Fy);
cudaFree(dev_auxM);
cudaFree(dev_auxN);
if (qm != NULL) {
free_qmatrix(qm);
free(qm);
}
} |
875135aa8445cfda08057bccdb04d4cf4af27ea6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/logit_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = fminf(X[i], (T(1) - eps));
Y[i] = fmaxf(Y[i], eps);
Y[i] = logf(Y[i] / (T(1) - Y[i]));
}
}
template <typename T>
__global__ void LogitGradientKernel(
const int N,
const T* X,
const T* dY,
const float eps,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] < eps || X[i] > T(1) - eps) ? T(0)
: (dY[i] / X[i] / (T(1) - X[i]));
}
}
} // namespace
template <>
template <typename T>
bool LogitFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
hipLaunchKernelGGL(( LogitKernel<T>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, X, eps_, Y);
return true;
}
template <>
bool LogitGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
int n = X.size();
hipLaunchKernelGGL(( LogitGradientKernel),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
n, X.data<float>(), dY.data<float>(), eps_, dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
Logit,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
LogitFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(LogitGradient, LogitGradientOp<float, CUDAContext>);
} // namespace caffe2
| 875135aa8445cfda08057bccdb04d4cf4af27ea6.cu | #include "caffe2/operators/logit_op.h"
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void LogitKernel(const int N, const T* X, const float eps, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = fminf(X[i], (T(1) - eps));
Y[i] = fmaxf(Y[i], eps);
Y[i] = logf(Y[i] / (T(1) - Y[i]));
}
}
template <typename T>
__global__ void LogitGradientKernel(
const int N,
const T* X,
const T* dY,
const float eps,
T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
dX[i] = (X[i] < eps || X[i] > T(1) - eps) ? T(0)
: (dY[i] / X[i] / (T(1) - X[i]));
}
}
} // namespace
template <>
template <typename T>
bool LogitFunctor<CUDAContext>::
operator()(const int N, const T* X, T* Y, CUDAContext* context) const {
LogitKernel<T>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, X, eps_, Y);
return true;
}
template <>
bool LogitGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
dX->ResizeLike(X);
int n = X.size();
LogitGradientKernel<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
n, X.data<float>(), dY.data<float>(), eps_, dX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
Logit,
UnaryElementwiseWithArgsOp<
TensorTypes<float>,
CUDAContext,
LogitFunctor<CUDAContext>>);
REGISTER_CUDA_OPERATOR(LogitGradient, LogitGradientOp<float, CUDAContext>);
} // namespace caffe2
|
e07702efccd46566a385fe6067470937300771f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define SIZE 33*1024
#define min(a, b) a > b ? b : a
const int threadperBlock = 256;
const int blocksperGrid = min(32, (SIZE + threadperBlock - 1) / threadperBlock);
__global__ void dot(float *a, float *b, float *c)
{
__shared__ float cache[threadperBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < SIZE)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main()
{
float *a, *b, *c_, c;
float *d_a, *d_b, *d_c_;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c_ = (float *)malloc(blocksperGrid*sizeof(float));
hipMalloc(&d_a, SIZE*sizeof(float));
hipMalloc(&d_b, SIZE*sizeof(float));
hipMalloc(&d_c_, blocksperGrid*sizeof(float));
for (int i = 0; i < SIZE; ++i)
{
a[i] = i;
b[i] = i;
}
hipMemcpy(d_a, a, SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( dot), dim3(blocksperGrid), dim3(threadperBlock), 0, 0, d_a, d_b, d_c_);
hipMemcpy(c_, d_c_, blocksperGrid*sizeof(float), hipMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < blocksperGrid; i++)
{
c += c_[i];
}
printf("%.6g\n", c);
float K = 0.;
for (int i = 0; i < SIZE; i++)
{
K += i*i;
}
printf("%.6g\n", K);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c_);
free(a);
free(b);
free(c_);
return 0;
}
| e07702efccd46566a385fe6067470937300771f2.cu | #include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#define SIZE 33*1024
#define min(a, b) a > b ? b : a
const int threadperBlock = 256;
const int blocksperGrid = min(32, (SIZE + threadperBlock - 1) / threadperBlock);
__global__ void dot(float *a, float *b, float *c)
{
__shared__ float cache[threadperBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < SIZE)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main()
{
float *a, *b, *c_, c;
float *d_a, *d_b, *d_c_;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c_ = (float *)malloc(blocksperGrid*sizeof(float));
cudaMalloc(&d_a, SIZE*sizeof(float));
cudaMalloc(&d_b, SIZE*sizeof(float));
cudaMalloc(&d_c_, blocksperGrid*sizeof(float));
for (int i = 0; i < SIZE; ++i)
{
a[i] = i;
b[i] = i;
}
cudaMemcpy(d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice);
dot<<<blocksperGrid, threadperBlock>>>(d_a, d_b, d_c_);
cudaMemcpy(c_, d_c_, blocksperGrid*sizeof(float), cudaMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < blocksperGrid; i++)
{
c += c_[i];
}
printf("%.6g\n", c);
float K = 0.;
for (int i = 0; i < SIZE; i++)
{
K += i*i;
}
printf("%.6g\n", K);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c_);
free(a);
free(b);
free(c_);
return 0;
}
|
e9277f0c2692678811a2695baa9f448b5ff18cf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "ConvolutionThrustFunctor.hh"
int totalConvolutions = 0;
#ifdef OMP_ON
#pragma omp threadprivate(totalConvolutions)
#endif
// Need multiple working spaces for the case of several convolutions in one PDF.
__constant__ fptype* dev_modWorkSpace[100];
__constant__ fptype* dev_resWorkSpace[100];
// Number which transforms model range (x1, x2) into resolution range (x1 - maxX, x2 - minX).
// It is equal to the maximum possible value of x0, ie maxX, in bins.
__constant__ int modelOffset = 0;
__device__ fptype device_ConvolvePdfs (fptype* evt, fptype* p, unsigned int* indices) {
fptype ret = 0;
fptype loBound = functorConstants[indices[5]+0];
fptype hiBound = functorConstants[indices[5]+1];
fptype step = functorConstants[indices[5]+2];
fptype x0 = evt[indices[2 + indices[0]]];
int workSpaceIndex = indices[6];
int numbins = (int) FLOOR((hiBound - loBound) / step);
fptype lowerBoundOffset = loBound / step;
lowerBoundOffset -= FLOOR(lowerBoundOffset);
int offsetInBins = (int) FLOOR(x0 / step - lowerBoundOffset);
// integral M(x) * R(x - x0) dx
for (int i = 0; i < numbins; ++i) {
fptype model = dev_modWorkSpace[workSpaceIndex][i];
fptype resol = dev_resWorkSpace[workSpaceIndex][i + modelOffset - offsetInBins];
ret += model*resol;
}
ret *= normalisationFactors[indices[2]];
ret *= normalisationFactors[indices[4]];
return ret;
}
__device__ device_function_ptr ptr_to_ConvolvePdfs = device_ConvolvePdfs;
ConvolutionThrustFunctor::ConvolutionThrustFunctor (std::string n,
Variable* x,
ThrustPdfFunctor* m,
ThrustPdfFunctor* r)
: ThrustPdfFunctor(x, n)
, model(m)
, resolution(r)
, host_iConsts(0)
, modelWorkSpace(0)
, resolWorkSpace(0)
, workSpaceIndex(0)
{
components.push_back(model);
components.push_back(resolution);
// static int totalConvolutions = 0;
// Indices stores (function index)(parameter index) doublet for model and resolution function.
std::vector<unsigned int> paramIndices;
paramIndices.push_back(model->getFunctionIndex());
paramIndices.push_back(model->getParameterIndex());
paramIndices.push_back(resolution->getFunctionIndex());
paramIndices.push_back(resolution->getParameterIndex());
paramIndices.push_back(registerConstants(3));
paramIndices.push_back(workSpaceIndex = totalConvolutions++);
hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ConvolvePdfs, sizeof(void*));
initialise(paramIndices);
setIntegrationConstants(-10, 10, 0.01);
}
__host__ void ConvolutionThrustFunctor::setIntegrationConstants (fptype lo, fptype hi, fptype step) {
if (!host_iConsts) {
host_iConsts = new fptype[6];
hipMalloc((void**) &dev_iConsts, 6*sizeof(fptype));
}
host_iConsts[0] = lo;
host_iConsts[1] = hi;
host_iConsts[2] = step;
hipMemcpyToSymbol(functorConstants, host_iConsts, 3*sizeof(fptype), cIndex*sizeof(fptype), hipMemcpyHostToDevice);
if (modelWorkSpace) {
delete modelWorkSpace;
delete resolWorkSpace;
}
int numbins = (int) floor((host_iConsts[1] - host_iConsts[0]) / step);
// Different format for integration range!
modelWorkSpace = new thrust::device_vector<fptype>(numbins);
// We will do integral from x1 to x2 of M(x)*R(x - x0) dx.
// So we need to cache the values of M from x1 to x2, which is given
// by the integration range. But R must be cached from x1-maxX to
// x2-minX, and the min and max are given by the dependent variable.
// However, the step must be the same as for the model, or the binning
// will get out of sync.
Variable* dependent = *(observables.begin());
host_iConsts[2] = numbins;
host_iConsts[3] = (host_iConsts[0] - dependent->upperlimit);
host_iConsts[4] = (host_iConsts[1] - dependent->lowerlimit);
numbins = (int) floor((host_iConsts[4] - host_iConsts[3]) / step);
host_iConsts[5] = numbins;
hipMemcpy(dev_iConsts, host_iConsts, 6*sizeof(fptype), hipMemcpyHostToDevice);
resolWorkSpace = new thrust::device_vector<fptype>(numbins);
// NB, this could potentially be a problem with multiple convolutions.
int offset = dependent->upperlimit / step;
hipMemcpyToSymbol(modelOffset, &offset, sizeof(int), 0, hipMemcpyHostToDevice);
fptype* dev_address[1];
dev_address[0] = (&((*modelWorkSpace)[0])).get();
hipMemcpyToSymbol(dev_modWorkSpace, dev_address, sizeof(fptype*), workSpaceIndex*sizeof(fptype), hipMemcpyHostToDevice);
dev_address[0] = (&((*resolWorkSpace)[0])).get();
hipMemcpyToSymbol(dev_resWorkSpace, dev_address, sizeof(fptype*), workSpaceIndex*sizeof(fptype), hipMemcpyHostToDevice);
}
__host__ fptype ConvolutionThrustFunctor::normalise () const {
// First set normalisation factors to one so we can evaluate convolution without getting zeroes
recursiveSetNormalisation(fptype(1.0));
hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice);
// Next recalculate functions at each point, in preparation for convolution integral
thrust::constant_iterator<fptype*> arrayAddress(dev_iConsts);
thrust::constant_iterator<int> eventSize(1);
thrust::counting_iterator<int> binIndex(0);
MetricTaker modalor(model, getMetricPointer("ptr_to_Eval"));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + modelWorkSpace->size(), eventSize, arrayAddress)),
modelWorkSpace->begin(),
modalor);
thrust::constant_iterator<fptype*> arrayAddress2(dev_iConsts + 3);
MetricTaker resalor(resolution, getMetricPointer("ptr_to_Eval"));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress2)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + resolWorkSpace->size(), eventSize, arrayAddress2)),
resolWorkSpace->begin(),
resalor);
// Then return usual integral
fptype ret = ThrustPdfFunctor::normalise();
return ret;
}
| e9277f0c2692678811a2695baa9f448b5ff18cf8.cu | #include "ConvolutionThrustFunctor.hh"
int totalConvolutions = 0;
#ifdef OMP_ON
#pragma omp threadprivate(totalConvolutions)
#endif
// Need multiple working spaces for the case of several convolutions in one PDF.
__constant__ fptype* dev_modWorkSpace[100];
__constant__ fptype* dev_resWorkSpace[100];
// Number which transforms model range (x1, x2) into resolution range (x1 - maxX, x2 - minX).
// It is equal to the maximum possible value of x0, ie maxX, in bins.
__constant__ int modelOffset = 0;
__device__ fptype device_ConvolvePdfs (fptype* evt, fptype* p, unsigned int* indices) {
fptype ret = 0;
fptype loBound = functorConstants[indices[5]+0];
fptype hiBound = functorConstants[indices[5]+1];
fptype step = functorConstants[indices[5]+2];
fptype x0 = evt[indices[2 + indices[0]]];
int workSpaceIndex = indices[6];
int numbins = (int) FLOOR((hiBound - loBound) / step);
fptype lowerBoundOffset = loBound / step;
lowerBoundOffset -= FLOOR(lowerBoundOffset);
int offsetInBins = (int) FLOOR(x0 / step - lowerBoundOffset);
// integral M(x) * R(x - x0) dx
for (int i = 0; i < numbins; ++i) {
fptype model = dev_modWorkSpace[workSpaceIndex][i];
fptype resol = dev_resWorkSpace[workSpaceIndex][i + modelOffset - offsetInBins];
ret += model*resol;
}
ret *= normalisationFactors[indices[2]];
ret *= normalisationFactors[indices[4]];
return ret;
}
__device__ device_function_ptr ptr_to_ConvolvePdfs = device_ConvolvePdfs;
ConvolutionThrustFunctor::ConvolutionThrustFunctor (std::string n,
Variable* x,
ThrustPdfFunctor* m,
ThrustPdfFunctor* r)
: ThrustPdfFunctor(x, n)
, model(m)
, resolution(r)
, host_iConsts(0)
, modelWorkSpace(0)
, resolWorkSpace(0)
, workSpaceIndex(0)
{
components.push_back(model);
components.push_back(resolution);
// static int totalConvolutions = 0;
// Indices stores (function index)(parameter index) doublet for model and resolution function.
std::vector<unsigned int> paramIndices;
paramIndices.push_back(model->getFunctionIndex());
paramIndices.push_back(model->getParameterIndex());
paramIndices.push_back(resolution->getFunctionIndex());
paramIndices.push_back(resolution->getParameterIndex());
paramIndices.push_back(registerConstants(3));
paramIndices.push_back(workSpaceIndex = totalConvolutions++);
cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ConvolvePdfs, sizeof(void*));
initialise(paramIndices);
setIntegrationConstants(-10, 10, 0.01);
}
__host__ void ConvolutionThrustFunctor::setIntegrationConstants (fptype lo, fptype hi, fptype step) {
if (!host_iConsts) {
host_iConsts = new fptype[6];
cudaMalloc((void**) &dev_iConsts, 6*sizeof(fptype));
}
host_iConsts[0] = lo;
host_iConsts[1] = hi;
host_iConsts[2] = step;
cudaMemcpyToSymbol(functorConstants, host_iConsts, 3*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice);
if (modelWorkSpace) {
delete modelWorkSpace;
delete resolWorkSpace;
}
int numbins = (int) floor((host_iConsts[1] - host_iConsts[0]) / step);
// Different format for integration range!
modelWorkSpace = new thrust::device_vector<fptype>(numbins);
// We will do integral from x1 to x2 of M(x)*R(x - x0) dx.
// So we need to cache the values of M from x1 to x2, which is given
// by the integration range. But R must be cached from x1-maxX to
// x2-minX, and the min and max are given by the dependent variable.
// However, the step must be the same as for the model, or the binning
// will get out of sync.
Variable* dependent = *(observables.begin());
host_iConsts[2] = numbins;
host_iConsts[3] = (host_iConsts[0] - dependent->upperlimit);
host_iConsts[4] = (host_iConsts[1] - dependent->lowerlimit);
numbins = (int) floor((host_iConsts[4] - host_iConsts[3]) / step);
host_iConsts[5] = numbins;
cudaMemcpy(dev_iConsts, host_iConsts, 6*sizeof(fptype), cudaMemcpyHostToDevice);
resolWorkSpace = new thrust::device_vector<fptype>(numbins);
// NB, this could potentially be a problem with multiple convolutions.
int offset = dependent->upperlimit / step;
cudaMemcpyToSymbol(modelOffset, &offset, sizeof(int), 0, cudaMemcpyHostToDevice);
fptype* dev_address[1];
dev_address[0] = (&((*modelWorkSpace)[0])).get();
cudaMemcpyToSymbol(dev_modWorkSpace, dev_address, sizeof(fptype*), workSpaceIndex*sizeof(fptype), cudaMemcpyHostToDevice);
dev_address[0] = (&((*resolWorkSpace)[0])).get();
cudaMemcpyToSymbol(dev_resWorkSpace, dev_address, sizeof(fptype*), workSpaceIndex*sizeof(fptype), cudaMemcpyHostToDevice);
}
__host__ fptype ConvolutionThrustFunctor::normalise () const {
// First set normalisation factors to one so we can evaluate convolution without getting zeroes
recursiveSetNormalisation(fptype(1.0));
cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice);
// Next recalculate functions at each point, in preparation for convolution integral
thrust::constant_iterator<fptype*> arrayAddress(dev_iConsts);
thrust::constant_iterator<int> eventSize(1);
thrust::counting_iterator<int> binIndex(0);
MetricTaker modalor(model, getMetricPointer("ptr_to_Eval"));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + modelWorkSpace->size(), eventSize, arrayAddress)),
modelWorkSpace->begin(),
modalor);
thrust::constant_iterator<fptype*> arrayAddress2(dev_iConsts + 3);
MetricTaker resalor(resolution, getMetricPointer("ptr_to_Eval"));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(binIndex, eventSize, arrayAddress2)),
thrust::make_zip_iterator(thrust::make_tuple(binIndex + resolWorkSpace->size(), eventSize, arrayAddress2)),
resolWorkSpace->begin(),
resalor);
// Then return usual integral
fptype ret = ThrustPdfFunctor::normalise();
return ret;
}
|
df5fc444df48924d0bf35b654297110186397c6e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "forward_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const float *mean = NULL;
hipMalloc(&mean, XSIZE*YSIZE);
const float *var = NULL;
hipMalloc(&var, XSIZE*YSIZE);
const float *weight = NULL;
hipMalloc(&weight, XSIZE*YSIZE);
const float *bias = NULL;
hipMalloc(&bias, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *z = NULL;
hipMalloc(&z, XSIZE*YSIZE);
float eps = 1;
int N = XSIZE*YSIZE;
int C = 2;
int S = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,var,weight,bias,y,z,eps,N,C,S);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,var,weight,bias,y,z,eps,N,C,S);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, x,mean,var,weight,bias,y,z,eps,N,C,S);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | df5fc444df48924d0bf35b654297110186397c6e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "forward_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const float *mean = NULL;
cudaMalloc(&mean, XSIZE*YSIZE);
const float *var = NULL;
cudaMalloc(&var, XSIZE*YSIZE);
const float *weight = NULL;
cudaMalloc(&weight, XSIZE*YSIZE);
const float *bias = NULL;
cudaMalloc(&bias, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *z = NULL;
cudaMalloc(&z, XSIZE*YSIZE);
float eps = 1;
int N = XSIZE*YSIZE;
int C = 2;
int S = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
forward_kernel<<<gridBlock,threadBlock>>>(x,mean,var,weight,bias,y,z,eps,N,C,S);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
forward_kernel<<<gridBlock,threadBlock>>>(x,mean,var,weight,bias,y,z,eps,N,C,S);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
forward_kernel<<<gridBlock,threadBlock>>>(x,mean,var,weight,bias,y,z,eps,N,C,S);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
46f0db565d1993dabde9f8004d9c5da3579db37b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#define N 500000000 //500 Million Elements
#define THREADS_PER_BLOCK 1024
// GPU kernel function to multiply two array elements and also update the results on the second array
__global__ void multiply(double *p, double *q, unsigned long n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
q[index] = p[index] * q[index];
}
int main(void) {
double *p, *q; // host copies of p, q
double *gpu_p, *gpu_q; // device copies of p, q
unsigned long size = N * sizeof(unsigned long); // we need space for N unsigned long integers
unsigned long i;
// Allocate GPU/device copies of gpu_p, gpu_q
hipMalloc((void**)&gpu_p, size);
hipMalloc((void**)&gpu_q, size);
// Allocate CPU/host copies of p, q
p = (double *)malloc(size);
q = (double *)malloc(size);
// Setup input values
for (i = 0; i < N - 1; ++i)
{
p[i] = 24.0;
q[i] = 12.0;
}
// Copy inputs to device
hipMemcpy(gpu_p, p, size, hipMemcpyHostToDevice);
hipMemcpy(gpu_q, q, size, hipMemcpyHostToDevice);
//INITIALIZE CUDA EVENTS
hipEvent_t start, stop;
float elapsedTime;
//CREATING EVENTS
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//CUDA KERNEL STUFF HERE...
// Launch multiply() kernel on GPU with N threads
multiply << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(gpu_p, gpu_q, N);
//FINISH RECORDING
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//CALCULATE ELAPSED TIME
hipEventElapsedTime(&elapsedTime, start, stop);
//DISPLAY COMPUTATION TIME
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
for (int igtx = 0; igtx < count; igtx++) {
hipGetDeviceProperties(&prop, igtx);
printf("\nGPU Device used for computation: %s\n", prop.name);
printf("\nMultiplication on GPU computed in: %f milliseconds", elapsedTime);
}
// Copy device result back to host copy of q
hipMemcpy(q, gpu_q, size, hipMemcpyDeviceToHost);
// Verifying all values to be 288.0
// fabs(q[i]-288) (absolute value) should be 0
double maxError = 0.0;
for (int i = 0; i < N-1; ++i){
maxError = fmax(maxError, fabs(q[i]-288.0));
}
std::cout << "\nMax error: " << maxError << std::endl;
// Clean CPU memory allocations
free(p); free(q);
// Clean GPU memory allocations
hipFree(gpu_p);
hipFree(gpu_q);
return 0;
}
| 46f0db565d1993dabde9f8004d9c5da3579db37b.cu | #include <iostream>
#define N 500000000 //500 Million Elements
#define THREADS_PER_BLOCK 1024
// GPU kernel function to multiply two array elements and also update the results on the second array
__global__ void multiply(double *p, double *q, unsigned long n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
q[index] = p[index] * q[index];
}
int main(void) {
double *p, *q; // host copies of p, q
double *gpu_p, *gpu_q; // device copies of p, q
unsigned long size = N * sizeof(unsigned long); // we need space for N unsigned long integers
unsigned long i;
// Allocate GPU/device copies of gpu_p, gpu_q
cudaMalloc((void**)&gpu_p, size);
cudaMalloc((void**)&gpu_q, size);
// Allocate CPU/host copies of p, q
p = (double *)malloc(size);
q = (double *)malloc(size);
// Setup input values
for (i = 0; i < N - 1; ++i)
{
p[i] = 24.0;
q[i] = 12.0;
}
// Copy inputs to device
cudaMemcpy(gpu_p, p, size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_q, q, size, cudaMemcpyHostToDevice);
//INITIALIZE CUDA EVENTS
cudaEvent_t start, stop;
float elapsedTime;
//CREATING EVENTS
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//CUDA KERNEL STUFF HERE...
// Launch multiply() kernel on GPU with N threads
multiply << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(gpu_p, gpu_q, N);
//FINISH RECORDING
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//CALCULATE ELAPSED TIME
cudaEventElapsedTime(&elapsedTime, start, stop);
//DISPLAY COMPUTATION TIME
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
for (int igtx = 0; igtx < count; igtx++) {
cudaGetDeviceProperties(&prop, igtx);
printf("\nGPU Device used for computation: %s\n", prop.name);
printf("\nMultiplication on GPU computed in: %f milliseconds", elapsedTime);
}
// Copy device result back to host copy of q
cudaMemcpy(q, gpu_q, size, cudaMemcpyDeviceToHost);
// Verifying all values to be 288.0
// fabs(q[i]-288) (absolute value) should be 0
double maxError = 0.0;
for (int i = 0; i < N-1; ++i){
maxError = fmax(maxError, fabs(q[i]-288.0));
}
std::cout << "\nMax error: " << maxError << std::endl;
// Clean CPU memory allocations
free(p); free(q);
// Clean GPU memory allocations
cudaFree(gpu_p);
cudaFree(gpu_q);
return 0;
}
|
70a24ac25aaeac5d0d28ecd74640819d12c67057.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "kernel.hip"
#include "kernel.h"
#include "headers.h"
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
void init_matrix(int *A, int *B, int *C, int *D, int size);
void init_filter(float *r, float *Vect_Up, float *Vect_F,
float *Vect_H, float *H, float *F, float *Vect_H_host, int size);
void init_des(unsigned char *packet_in, int size, int index);
int main(){
int i, j, k;
int *h_A[BT_NUM], *h_B[BT_NUM], *h_C[BT_NUM], *h_D[BT_NUM];
int *d_A[BT_NUM], *d_B[BT_NUM], *d_C[BT_NUM];
int *h_count[BT_NUM];
int *d_count[BT_NUM];
int *h_count_host[BT_NUM];
float *h_task_indx;
float *d_task_indx;
float *h_r[BT_NUM],*d_r[BT_NUM];
float *y, *h_H[BT_NUM], *d_H[BT_NUM];
float *h_F[BT_NUM], *d_F[BT_NUM];
float *h_Vect_H[BT_NUM], *d_Vect_H[BT_NUM]; // output of the F
float *h_Vect_Dn[BT_NUM], *d_Vect_Dn[BT_NUM]; // output of the down sampler
float *h_Vect_Up[BT_NUM], *d_Vect_Up[BT_NUM]; // output of the up sampler
float *h_Vect_F[BT_NUM], *d_Vect_F[BT_NUM], *h_Vect_F_host[BT_NUM]; // this is the output of the
unsigned char *h_packet_in[BT_NUM], *d_packet_in[BT_NUM];
unsigned char *h_packet_out[BT_NUM], *d_packet_out[BT_NUM];
unsigned char *h_packet_host[BT_NUM];
uint32 *h_des_esk;
uint32 *h_des_dsk;
uint32 *d_des_esk;
uint32 *d_des_dsk;
int num_thread[task], *d_num_thread;
int num_size[BT_NUM*(TK_NUM/SUB_NUM)];
int pos_task[BT_NUM][TK_NUM];
int *pos_task_dev[BT_NUM];
FILE *fp;
hipSetDevice(0);
double start_timer, end_timer;
fp = fopen("rand.txt", "r");
for(i = 0; i < task; i++)
fscanf(fp, "%1d", &num_thread[i]);
fclose(fp);
for(i = 0; i < task; i++)
num_thread[i] *= 32;
for(i = 0; i < BT_NUM*(TK_NUM/SUB_NUM); i++){
num_size[i] = 0;
switch(i/BT_NUM){
case 0:
for(j = 0; j < SUB_NUM; j++)
num_size[i] += (num_thread[i*TK_NUM+j]*num_thread[i*TK_NUM+j]);
break;
case 1:
for(j = SUB_NUM; j < (2*SUB_NUM); j++)
num_size[i] += (num_thread[(i%BT_NUM)*TK_NUM+j]*num_thread[(i%BT_NUM)*TK_NUM+j]);
break;
case 2:
for(j = (2*SUB_NUM); j < (3*SUB_NUM); j++)
num_size[i] += (num_thread[(i%BT_NUM)*TK_NUM+j]*num_thread[(i%BT_NUM)*TK_NUM+j]);
break;
case 3:
for(j = (3*SUB_NUM); j < (4*SUB_NUM); j++)
num_size[i] += (num_thread[(i%BT_NUM)*TK_NUM+j]*num_thread[(i%BT_NUM)*TK_NUM+j]);
break;
}
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < TK_NUM/SUB_NUM; j++){
switch(j){
case 0:
for(k = 0; k < SUB_NUM; k++){
if(k == 0) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
case 1:
for(k = SUB_NUM; k < 2*SUB_NUM; k++){
if(k == SUB_NUM) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
case 2:
for(k = 2*SUB_NUM; k < 3*SUB_NUM; k++){
if(k == 2*SUB_NUM) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
case 3:
for(k = 3*SUB_NUM; k < 4*SUB_NUM; k++){
if(k == 3*SUB_NUM) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
}
}
}
checkCudaErrors(hipMalloc(&d_num_thread, task*sizeof(int)));
//matrix mult.
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostMalloc(&h_A[i], num_size[i]*sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipHostMalloc(&h_B[i], num_size[i]*sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipHostMalloc(&h_C[i], num_size[i]*sizeof(int), hipHostMallocDefault));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMalloc(&d_A[i], num_size[i]*sizeof(int)));
checkCudaErrors(hipMalloc(&d_B[i], num_size[i]*sizeof(int)));
checkCudaErrors(hipMalloc(&d_C[i], num_size[i]*sizeof(int)));
checkCudaErrors(hipMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
h_D[i] = (int*)malloc(sizeof(int)*num_size[i]);
}
// mandelbrot
h_task_indx = (float*)malloc(task * sizeof(float));
checkCudaErrors(hipMalloc(&d_task_indx, task*sizeof(float)));
for(i = 0; i < task; i++){
h_task_indx[i] = (float)(i/(task/2.0));
}
for(i = 0; i < BT_NUM; i++) {
checkCudaErrors(hipHostMalloc(&h_count[i], num_size[i+BT_NUM] *sizeof(int), NULL));
checkCudaErrors(hipMalloc(&d_count[i], num_size[i+BT_NUM] *sizeof(int)));
h_count_host[i] = (int*)malloc(num_size[i+BT_NUM] * sizeof(int));
}
//filter bank
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostMalloc(&h_r[i], num_size[i+2*BT_NUM] *sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_r[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_H[i], N_col*SUB_NUM*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_H[i], N_col*SUB_NUM*sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_F[i], N_col*SUB_NUM*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_F[i], N_col*SUB_NUM*sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_Vect_H[i], num_size[i+2*BT_NUM]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_Vect_H[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_Vect_Dn[i], num_size[i+2*BT_NUM]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_Vect_Dn[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_Vect_Up[i], num_size[i+2*BT_NUM]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_Vect_Up[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(hipHostMalloc(&h_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float)));
h_Vect_F_host[i] = (float*)malloc(num_size[i+2*BT_NUM]*sizeof(float));
}
//DES
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostMalloc(&h_packet_in[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_packet_in[i], num_size[i+3*BT_NUM]*sizeof(unsigned char)));
checkCudaErrors(hipHostMalloc(&h_packet_out[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_packet_out[i], num_size[i+3*BT_NUM]*sizeof(unsigned char)));
h_packet_host[i] = (unsigned char *) malloc (num_size[i+3*BT_NUM]*sizeof(unsigned char));
}
checkCudaErrors(hipHostMalloc(&h_des_esk, 96*sizeof(uint32), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_des_esk, 96*sizeof(uint32)));
checkCudaErrors(hipHostMalloc(&h_des_dsk, 96*sizeof(uint32), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_des_dsk, 96*sizeof(uint32)));
printf("MPE CUDA static fusion inputs are generating\n");
/*Generate encryption key*/
des_set_key(h_des_esk, h_des_dsk, DES3_keys[0], DES3_keys[1], DES3_keys[2]);
//Init.matrix
for(i = 0; i < BT_NUM; i++){
init_matrix(h_A[i], h_B[i], h_C[i], h_D[i], num_size[i]);
}
//Init filter
for(i = 0; i < BT_NUM; i++){
init_filter(h_r[i], h_Vect_Up[i], h_Vect_F[i],
h_Vect_H[i], h_H[i], h_F[i], h_Vect_F_host[i],
num_size[i+2*BT_NUM]);
}
//Init DES
for(i = 0; i < BT_NUM; i++){
init_des(h_packet_in[i], num_size[i+3*SUB_NUM], i);
}
#if 1
//mem copy
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(d_A[i], h_A[i], num_size[i] *sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B[i], h_B[i], num_size[i] *sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pos_task_dev[i], pos_task[i], TK_NUM *sizeof(int), hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(d_task_indx, h_task_indx, task*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_num_thread, num_thread, task*sizeof(float), hipMemcpyHostToDevice));
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(d_r[i], h_r[i], num_size[i+2*BT_NUM]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Vect_Up[i], h_Vect_Up[i], num_size[i+2*BT_NUM]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Vect_F[i], h_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_Vect_H[i], h_Vect_H[i], num_size[i+2*BT_NUM]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_H[i], h_H[i], N_col*SUB_NUM*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_F[i], h_F[i], N_col*SUB_NUM*sizeof(float), hipMemcpyHostToDevice));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(d_packet_in[i], h_packet_in[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(d_des_esk, h_des_esk, 96*sizeof(uint32), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_des_dsk, h_des_dsk, 96*sizeof(uint32), hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
printf("MPE CUDA static fusion is running\n");
start_timer = my_timer();
// cpu compute
#if 1
for(int k = 0; k < 2; k++){
for(i = 0; i < BT_NUM; i++){
hipLaunchKernelGGL(( d_fused_kernel), dim3(TK_NUM), dim3(TDK_NUM), 0, 0, d_A[i], d_B[i], d_C[i], d_count[i], d_task_indx, d_r[i],
d_H[i], d_Vect_H[i], d_Vect_Dn[i], d_Vect_Up[i], d_Vect_F[i],
d_F[i], d_des_esk, d_des_dsk, d_packet_in[i], d_packet_out[i],
d_num_thread, pos_task_dev[i], i);
}
}
checkCudaErrors(hipDeviceSynchronize());
#endif
end_timer = my_timer();
printf("Multiprogramming CUDA static fusion elapsed Time: %lf Sec.\n", end_timer - start_timer);
// memory copy back
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(h_C[i],d_C[i], num_size[i]*sizeof(int), hipMemcpyDeviceToHost));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpyAsync(h_count[i], d_count[i], num_size[i+BT_NUM]*sizeof(int), hipMemcpyDeviceToHost));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(h_Vect_F[i], d_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float), hipMemcpyDeviceToHost));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipMemcpy(h_packet_out[i], d_packet_out[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), hipMemcpyDeviceToHost));
}
checkCudaErrors(hipDeviceSynchronize());
//printf("cpu running\n");
start_timer = my_timer();
// cpu compute
#if 1
for(int k = 0; k < 2; k++){
for(i = 0; i < BT_NUM; i++){
fused_kernel(h_A[i], h_B[i], h_D[i], h_count_host[i], h_task_indx, h_r[i],
h_H[i], h_Vect_H[i], h_Vect_Dn[i], h_Vect_Up[i], h_Vect_F_host[i],
h_F[i], h_des_esk, h_des_dsk, h_packet_in[i], h_packet_host[i],
num_thread, pos_task[i], i);
}
}
#endif
end_timer = my_timer();
//printf("CPU elapsed time:%lf Sec.\n", end_timer - start_timer);
#if 1
printf("verifying\n");
int flag = 0;
//verificiation
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i]; j++){
if(h_C[i][j] != h_D[i][j]){
printf("Mult, Error:%d, %d, %d, %d\n", h_C[i][j], h_D[i][j], i, j);
flag = 1;
break;
}
}
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i+2*BT_NUM]; j++){
if(abs(h_Vect_F[i][j]- h_Vect_F_host[i][j]) > 0.1){
printf("Filter Error:%f, %f\n", h_Vect_F[i][j], h_Vect_F_host[i][j]);
flag = 1;
break;
}
}
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i+3*BT_NUM]; j++){
if(h_packet_out[i][j] != h_packet_host[i][j]){
printf("DES Error:%u, %u, %d, %d\n", h_packet_out[i][j], h_packet_host[i][j], i, j);
flag = 1;
break;
}
}
}
if(!flag) printf("verify succesffully\n");
#endif
#endif
//free mem.
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(hipHostFree(h_A[i]));
checkCudaErrors(hipFree(d_A[i]));
checkCudaErrors(hipHostFree(h_B[i]));
checkCudaErrors(hipFree(d_B[i]));
checkCudaErrors(hipHostFree(h_C[i]));
checkCudaErrors(hipFree(d_C[i]));
checkCudaErrors(hipHostFree(h_count[i]));
checkCudaErrors(hipFree(d_count[i]));
checkCudaErrors(hipHostFree(h_r[i]));
checkCudaErrors(hipFree(d_r[i]));
checkCudaErrors(hipHostFree(h_H[i]));
checkCudaErrors(hipFree(d_H[i]));
checkCudaErrors(hipHostFree(h_F[i]));
checkCudaErrors(hipFree(d_F[i]));
checkCudaErrors(hipHostFree(h_Vect_H[i]));
checkCudaErrors(hipFree(d_Vect_H[i]));
checkCudaErrors(hipHostFree(h_Vect_Dn[i]));
checkCudaErrors(hipFree(d_Vect_Dn[i]));
checkCudaErrors(hipHostFree(h_Vect_Up[i]));
checkCudaErrors(hipFree(d_Vect_Up[i]));
checkCudaErrors(hipHostFree(h_Vect_F[i]));
checkCudaErrors(hipFree(d_Vect_F[i]));
checkCudaErrors(hipHostFree(h_packet_in[i]));
checkCudaErrors(hipFree(d_packet_in[i]));
checkCudaErrors(hipHostFree(h_packet_out[i]));
checkCudaErrors(hipFree(d_packet_out[i]));
checkCudaErrors(hipFree(pos_task_dev[i]));
free(h_packet_host[i]);
free(h_count_host[i]);
free(h_Vect_F_host[i]);
}
checkCudaErrors(hipFree(d_task_indx));
checkCudaErrors(hipHostFree(h_des_esk));
checkCudaErrors(hipFree(d_des_esk));
checkCudaErrors(hipHostFree(h_des_dsk));
checkCudaErrors(hipFree(d_des_dsk));
checkCudaErrors(hipFree(d_num_thread));
free(h_task_indx);
return 0;
}
void init_matrix(int *A, int *B, int *C, int *D, int size){
int i;
for(i = 0; i < size; i++){
A[i] = (i%8)+1;
B[i] = (i%8)+1;
C[i] = 0;
D[i] = 0;
}
}
void init_filter(float *r, float *Vect_Up, float *Vect_F,
float *Vect_H, float *H, float *F,
float *Vect_F_host, int size){
int i;
for(i = 0; i < size; i++){
r[i] = i + 0.0001;
Vect_Up[i] = 0;
Vect_F[i] = 0;
Vect_H[i]=0;
Vect_F_host[i] = 0;
}
for(i = 0; i < N_col*SUB_NUM; i++){
H[i] = 0.0001;
F[i] = 0.0001;
}
}
void init_des(unsigned char *packet_in, int size, int index){
int i;
for(i = 0; i < size; i++){
if(i < HEADER_SIZE ){
packet_in[i] = headers[index % MAX_PACKETS][i];
}else{
packet_in[i] = DES3_init[i%8];
}
}
}
| 70a24ac25aaeac5d0d28ecd74640819d12c67057.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "kernel.cu"
#include "kernel.h"
#include "headers.h"
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
void init_matrix(int *A, int *B, int *C, int *D, int size);
void init_filter(float *r, float *Vect_Up, float *Vect_F,
float *Vect_H, float *H, float *F, float *Vect_H_host, int size);
void init_des(unsigned char *packet_in, int size, int index);
int main(){
int i, j, k;
int *h_A[BT_NUM], *h_B[BT_NUM], *h_C[BT_NUM], *h_D[BT_NUM];
int *d_A[BT_NUM], *d_B[BT_NUM], *d_C[BT_NUM];
int *h_count[BT_NUM];
int *d_count[BT_NUM];
int *h_count_host[BT_NUM];
float *h_task_indx;
float *d_task_indx;
float *h_r[BT_NUM],*d_r[BT_NUM];
float *y, *h_H[BT_NUM], *d_H[BT_NUM];
float *h_F[BT_NUM], *d_F[BT_NUM];
float *h_Vect_H[BT_NUM], *d_Vect_H[BT_NUM]; // output of the F
float *h_Vect_Dn[BT_NUM], *d_Vect_Dn[BT_NUM]; // output of the down sampler
float *h_Vect_Up[BT_NUM], *d_Vect_Up[BT_NUM]; // output of the up sampler
float *h_Vect_F[BT_NUM], *d_Vect_F[BT_NUM], *h_Vect_F_host[BT_NUM]; // this is the output of the
unsigned char *h_packet_in[BT_NUM], *d_packet_in[BT_NUM];
unsigned char *h_packet_out[BT_NUM], *d_packet_out[BT_NUM];
unsigned char *h_packet_host[BT_NUM];
uint32 *h_des_esk;
uint32 *h_des_dsk;
uint32 *d_des_esk;
uint32 *d_des_dsk;
int num_thread[task], *d_num_thread;
int num_size[BT_NUM*(TK_NUM/SUB_NUM)];
int pos_task[BT_NUM][TK_NUM];
int *pos_task_dev[BT_NUM];
FILE *fp;
cudaSetDevice(0);
double start_timer, end_timer;
fp = fopen("rand.txt", "r");
for(i = 0; i < task; i++)
fscanf(fp, "%1d", &num_thread[i]);
fclose(fp);
for(i = 0; i < task; i++)
num_thread[i] *= 32;
for(i = 0; i < BT_NUM*(TK_NUM/SUB_NUM); i++){
num_size[i] = 0;
switch(i/BT_NUM){
case 0:
for(j = 0; j < SUB_NUM; j++)
num_size[i] += (num_thread[i*TK_NUM+j]*num_thread[i*TK_NUM+j]);
break;
case 1:
for(j = SUB_NUM; j < (2*SUB_NUM); j++)
num_size[i] += (num_thread[(i%BT_NUM)*TK_NUM+j]*num_thread[(i%BT_NUM)*TK_NUM+j]);
break;
case 2:
for(j = (2*SUB_NUM); j < (3*SUB_NUM); j++)
num_size[i] += (num_thread[(i%BT_NUM)*TK_NUM+j]*num_thread[(i%BT_NUM)*TK_NUM+j]);
break;
case 3:
for(j = (3*SUB_NUM); j < (4*SUB_NUM); j++)
num_size[i] += (num_thread[(i%BT_NUM)*TK_NUM+j]*num_thread[(i%BT_NUM)*TK_NUM+j]);
break;
}
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < TK_NUM/SUB_NUM; j++){
switch(j){
case 0:
for(k = 0; k < SUB_NUM; k++){
if(k == 0) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
case 1:
for(k = SUB_NUM; k < 2*SUB_NUM; k++){
if(k == SUB_NUM) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
case 2:
for(k = 2*SUB_NUM; k < 3*SUB_NUM; k++){
if(k == 2*SUB_NUM) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
case 3:
for(k = 3*SUB_NUM; k < 4*SUB_NUM; k++){
if(k == 3*SUB_NUM) pos_task[i][k] = 0;
else pos_task[i][k] += pos_task[i][k-1] + (num_thread[i*TK_NUM+k-1]*num_thread[i*TK_NUM+k-1]);
}
break;
}
}
}
checkCudaErrors(cudaMalloc(&d_num_thread, task*sizeof(int)));
//matrix mult.
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaHostAlloc(&h_A[i], num_size[i]*sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&h_B[i], num_size[i]*sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&h_C[i], num_size[i]*sizeof(int), cudaHostAllocDefault));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMalloc(&d_A[i], num_size[i]*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_B[i], num_size[i]*sizeof(int)));
checkCudaErrors(cudaMalloc(&d_C[i], num_size[i]*sizeof(int)));
checkCudaErrors(cudaMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
h_D[i] = (int*)malloc(sizeof(int)*num_size[i]);
}
// mandelbrot
h_task_indx = (float*)malloc(task * sizeof(float));
checkCudaErrors(cudaMalloc(&d_task_indx, task*sizeof(float)));
for(i = 0; i < task; i++){
h_task_indx[i] = (float)(i/(task/2.0));
}
for(i = 0; i < BT_NUM; i++) {
checkCudaErrors(cudaHostAlloc(&h_count[i], num_size[i+BT_NUM] *sizeof(int), NULL));
checkCudaErrors(cudaMalloc(&d_count[i], num_size[i+BT_NUM] *sizeof(int)));
h_count_host[i] = (int*)malloc(num_size[i+BT_NUM] * sizeof(int));
}
//filter bank
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaHostAlloc(&h_r[i], num_size[i+2*BT_NUM] *sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_r[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_H[i], N_col*SUB_NUM*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_H[i], N_col*SUB_NUM*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_F[i], N_col*SUB_NUM*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_F[i], N_col*SUB_NUM*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_Vect_H[i], num_size[i+2*BT_NUM]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_Vect_H[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_Vect_Dn[i], num_size[i+2*BT_NUM]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_Vect_Dn[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_Vect_Up[i], num_size[i+2*BT_NUM]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_Vect_Up[i], num_size[i+2*BT_NUM]*sizeof(float)));
checkCudaErrors(cudaHostAlloc(&h_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float)));
h_Vect_F_host[i] = (float*)malloc(num_size[i+2*BT_NUM]*sizeof(float));
}
//DES
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaHostAlloc(&h_packet_in[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_packet_in[i], num_size[i+3*BT_NUM]*sizeof(unsigned char)));
checkCudaErrors(cudaHostAlloc(&h_packet_out[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_packet_out[i], num_size[i+3*BT_NUM]*sizeof(unsigned char)));
h_packet_host[i] = (unsigned char *) malloc (num_size[i+3*BT_NUM]*sizeof(unsigned char));
}
checkCudaErrors(cudaHostAlloc(&h_des_esk, 96*sizeof(uint32), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_des_esk, 96*sizeof(uint32)));
checkCudaErrors(cudaHostAlloc(&h_des_dsk, 96*sizeof(uint32), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_des_dsk, 96*sizeof(uint32)));
printf("MPE CUDA static fusion inputs are generating\n");
/*Generate encryption key*/
des_set_key(h_des_esk, h_des_dsk, DES3_keys[0], DES3_keys[1], DES3_keys[2]);
//Init.matrix
for(i = 0; i < BT_NUM; i++){
init_matrix(h_A[i], h_B[i], h_C[i], h_D[i], num_size[i]);
}
//Init filter
for(i = 0; i < BT_NUM; i++){
init_filter(h_r[i], h_Vect_Up[i], h_Vect_F[i],
h_Vect_H[i], h_H[i], h_F[i], h_Vect_F_host[i],
num_size[i+2*BT_NUM]);
}
//Init DES
for(i = 0; i < BT_NUM; i++){
init_des(h_packet_in[i], num_size[i+3*SUB_NUM], i);
}
#if 1
//mem copy
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(d_A[i], h_A[i], num_size[i] *sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B[i], h_B[i], num_size[i] *sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pos_task_dev[i], pos_task[i], TK_NUM *sizeof(int), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(d_task_indx, h_task_indx, task*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_num_thread, num_thread, task*sizeof(float), cudaMemcpyHostToDevice));
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(d_r[i], h_r[i], num_size[i+2*BT_NUM]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Vect_Up[i], h_Vect_Up[i], num_size[i+2*BT_NUM]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Vect_F[i], h_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_Vect_H[i], h_Vect_H[i], num_size[i+2*BT_NUM]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_H[i], h_H[i], N_col*SUB_NUM*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_F[i], h_F[i], N_col*SUB_NUM*sizeof(float), cudaMemcpyHostToDevice));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(d_packet_in[i], h_packet_in[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(d_des_esk, h_des_esk, 96*sizeof(uint32), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_des_dsk, h_des_dsk, 96*sizeof(uint32), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
printf("MPE CUDA static fusion is running\n");
start_timer = my_timer();
// cpu compute
#if 1
for(int k = 0; k < 2; k++){
for(i = 0; i < BT_NUM; i++){
d_fused_kernel<<<TK_NUM, TDK_NUM>>>(d_A[i], d_B[i], d_C[i], d_count[i], d_task_indx, d_r[i],
d_H[i], d_Vect_H[i], d_Vect_Dn[i], d_Vect_Up[i], d_Vect_F[i],
d_F[i], d_des_esk, d_des_dsk, d_packet_in[i], d_packet_out[i],
d_num_thread, pos_task_dev[i], i);
}
}
checkCudaErrors(cudaDeviceSynchronize());
#endif
end_timer = my_timer();
printf("Multiprogramming CUDA static fusion elapsed Time: %lf Sec.\n", end_timer - start_timer);
// memory copy back
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(h_C[i],d_C[i], num_size[i]*sizeof(int), cudaMemcpyDeviceToHost));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpyAsync(h_count[i], d_count[i], num_size[i+BT_NUM]*sizeof(int), cudaMemcpyDeviceToHost));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(h_Vect_F[i], d_Vect_F[i], num_size[i+2*BT_NUM]*sizeof(float), cudaMemcpyDeviceToHost));
}
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaMemcpy(h_packet_out[i], d_packet_out[i], num_size[i+3*BT_NUM]*sizeof(unsigned char), cudaMemcpyDeviceToHost));
}
checkCudaErrors(cudaDeviceSynchronize());
//printf("cpu running\n");
start_timer = my_timer();
// cpu compute
#if 1
for(int k = 0; k < 2; k++){
for(i = 0; i < BT_NUM; i++){
fused_kernel(h_A[i], h_B[i], h_D[i], h_count_host[i], h_task_indx, h_r[i],
h_H[i], h_Vect_H[i], h_Vect_Dn[i], h_Vect_Up[i], h_Vect_F_host[i],
h_F[i], h_des_esk, h_des_dsk, h_packet_in[i], h_packet_host[i],
num_thread, pos_task[i], i);
}
}
#endif
end_timer = my_timer();
//printf("CPU elapsed time:%lf Sec.\n", end_timer - start_timer);
#if 1
printf("verifying\n");
int flag = 0;
//verificiation
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i]; j++){
if(h_C[i][j] != h_D[i][j]){
printf("Mult, Error:%d, %d, %d, %d\n", h_C[i][j], h_D[i][j], i, j);
flag = 1;
break;
}
}
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i+2*BT_NUM]; j++){
if(abs(h_Vect_F[i][j]- h_Vect_F_host[i][j]) > 0.1){
printf("Filter Error:%f, %f\n", h_Vect_F[i][j], h_Vect_F_host[i][j]);
flag = 1;
break;
}
}
}
for(i = 0; i < BT_NUM; i++){
for(j = 0; j < num_size[i+3*BT_NUM]; j++){
if(h_packet_out[i][j] != h_packet_host[i][j]){
printf("DES Error:%u, %u, %d, %d\n", h_packet_out[i][j], h_packet_host[i][j], i, j);
flag = 1;
break;
}
}
}
if(!flag) printf("verify succesffully\n");
#endif
#endif
//free mem.
for(i = 0; i < BT_NUM; i++){
checkCudaErrors(cudaFreeHost(h_A[i]));
checkCudaErrors(cudaFree(d_A[i]));
checkCudaErrors(cudaFreeHost(h_B[i]));
checkCudaErrors(cudaFree(d_B[i]));
checkCudaErrors(cudaFreeHost(h_C[i]));
checkCudaErrors(cudaFree(d_C[i]));
checkCudaErrors(cudaFreeHost(h_count[i]));
checkCudaErrors(cudaFree(d_count[i]));
checkCudaErrors(cudaFreeHost(h_r[i]));
checkCudaErrors(cudaFree(d_r[i]));
checkCudaErrors(cudaFreeHost(h_H[i]));
checkCudaErrors(cudaFree(d_H[i]));
checkCudaErrors(cudaFreeHost(h_F[i]));
checkCudaErrors(cudaFree(d_F[i]));
checkCudaErrors(cudaFreeHost(h_Vect_H[i]));
checkCudaErrors(cudaFree(d_Vect_H[i]));
checkCudaErrors(cudaFreeHost(h_Vect_Dn[i]));
checkCudaErrors(cudaFree(d_Vect_Dn[i]));
checkCudaErrors(cudaFreeHost(h_Vect_Up[i]));
checkCudaErrors(cudaFree(d_Vect_Up[i]));
checkCudaErrors(cudaFreeHost(h_Vect_F[i]));
checkCudaErrors(cudaFree(d_Vect_F[i]));
checkCudaErrors(cudaFreeHost(h_packet_in[i]));
checkCudaErrors(cudaFree(d_packet_in[i]));
checkCudaErrors(cudaFreeHost(h_packet_out[i]));
checkCudaErrors(cudaFree(d_packet_out[i]));
checkCudaErrors(cudaFree(pos_task_dev[i]));
free(h_packet_host[i]);
free(h_count_host[i]);
free(h_Vect_F_host[i]);
}
checkCudaErrors(cudaFree(d_task_indx));
checkCudaErrors(cudaFreeHost(h_des_esk));
checkCudaErrors(cudaFree(d_des_esk));
checkCudaErrors(cudaFreeHost(h_des_dsk));
checkCudaErrors(cudaFree(d_des_dsk));
checkCudaErrors(cudaFree(d_num_thread));
free(h_task_indx);
return 0;
}
void init_matrix(int *A, int *B, int *C, int *D, int size){
int i;
for(i = 0; i < size; i++){
A[i] = (i%8)+1;
B[i] = (i%8)+1;
C[i] = 0;
D[i] = 0;
}
}
void init_filter(float *r, float *Vect_Up, float *Vect_F,
float *Vect_H, float *H, float *F,
float *Vect_F_host, int size){
int i;
for(i = 0; i < size; i++){
r[i] = i + 0.0001;
Vect_Up[i] = 0;
Vect_F[i] = 0;
Vect_H[i]=0;
Vect_F_host[i] = 0;
}
for(i = 0; i < N_col*SUB_NUM; i++){
H[i] = 0.0001;
F[i] = 0.0001;
}
}
void init_des(unsigned char *packet_in, int size, int index){
int i;
for(i = 0; i < size; i++){
if(i < HEADER_SIZE ){
packet_in[i] = headers[index % MAX_PACKETS][i];
}else{
packet_in[i] = DES3_init[i%8];
}
}
}
|
5cf25cfc4153c52ef10c05289295cf9e81198280.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/common.cuh"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// function of max(0,1-|x-x_grid|)
template <typename Dtype>
__device__ Dtype max_abs(const Dtype x, const Dtype x_grid) {
if ((x_grid - x) * (x_grid - x) >= 1) {
return 0;
} else if (x_grid >= x) {
return 1 - x_grid + x;
} else {
return 1 - x + x_grid;
}
}
// derived function of max(0,1-|x-x_grid|)
template <typename Dtype>
__device__ Dtype diff_max_abs(const Dtype x, const Dtype x_grid) {
if ((x_grid - x) * (x_grid - x) >= 1) {
return 0;
} else if (x_grid >= x) {
return -1;
} else {
return 1;
}
}
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype>
__global__ void im2col_dynamic_gpu_kernel(const int n, const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* data_im_ceil_ceil = data_im;
const Dtype* data_im_ceil_floor = data_im;
const Dtype* data_im_floor_ceil = data_im;
const Dtype* data_im_floor_floor = data_im;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[((n * channels + c_im) * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == pad_h) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
double dist = iDilationCeil - dilation;
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
*data_col = data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil] * (1-dist) * (1-dist) +
data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor] * (1-dist) * dist +
data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil] * dist * (1-dist) +
data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor] * dist * dist;
}
else {
*data_col = 0.;
}
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_dynamic_gpu(const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_col) {
// We are going to launch num * channels * height_col * width_col kernels, each
// kernel responsible for copying a single block from a single image.
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_dynamic_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w,
height_col, width_col,
data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_dynamic_gpu<float>(const float* data_im, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_col);
template void im2col_dynamic_gpu<double>(const double* data_im, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_col);
template <typename Dtype>
__global__ void im2col_dynamic_back_gpu_kernel(const int n, const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int max_dilation,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_col, Dtype* data_dist) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* data_im_ceil_ceil = data_im;
const Dtype* data_im_ceil_floor = data_im;
const Dtype* data_im_floor_ceil = data_im;
const Dtype* data_im_floor_floor = data_im;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[h * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == max_dilation) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
// Dtype dist = dilation - iDilationFloor;
Dtype dist = iDilationCeil - dilation;
if (c_im == 0) {
data_dist[h * width + w] = dist;
}
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
// Dtype diff_h = i-(kernel_h-1)/2;
// Dtype diff_w = j-(kernel_w-1)/2;
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
/* *data_col = data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil] * (diff_cc_h * diff_h + diff_cc_w * diff_w) +
data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor] * (diff_cf_h * diff_h + diff_cf_w * diff_w) +
data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil] * (diff_fc_h * diff_h + diff_fc_w * diff_w) +
data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor] * (diff_ff_h * diff_h + diff_ff_w * diff_w);*/
*data_col = data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil] * (2 - 2*dist) +
data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor] * (2*dist - 1) +
data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil] * (2*dist - 1) +
data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor] * (-2*dist);
//*data_col = 0;
}
else {
*data_col = 0;
}
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_dynamic_back_gpu(const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_col) {
// We are going to launch num * channels * height_col * width_col kernels, each
// kernel responsible for copying a single block from a single image.
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
Blob<Dtype> dilation_temp;
dilation_temp.Reshape(1, 1, height_col, width_col);
Dtype* pDilation = dilation_temp.mutable_gpu_data();
hipLaunchKernelGGL(( im2col_dynamic_back_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h,
stride_h, stride_w,
height_col, width_col,
data_col, pDilation);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_dynamic_back_gpu<float>(const float* data_im, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_col);
template void im2col_dynamic_back_gpu<double>(const double* data_im, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype>
__global__ void elementwise_multiply_kernel(int count, const Dtype* input, Dtype* output) {
CUDA_KERNEL_LOOP(index, count) {
output[index] = input[index]*output[index];
}
}
template <typename Dtype>
void elementwise_multiply(int count, const Dtype* input, Dtype* output) {
hipLaunchKernelGGL(( elementwise_multiply_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input, output);
CUDA_POST_KERNEL_CHECK;
}
template void elementwise_multiply(int count, const double* input, double* output);
template void elementwise_multiply(int count, const float* input, float* output);
template <typename Dtype>
__global__ void col2im_dynamic_gpu_kernel(const int n, const Dtype* data_col, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype* data_im_ceil_ceil = data_im;
Dtype* data_im_ceil_floor = data_im;
Dtype* data_im_floor_ceil = data_im;
Dtype* data_im_floor_floor = data_im;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[((n * channels + c_im) * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == pad_h) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
Dtype dist = iDilationCeil - dilation;
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
atomicAdd(&data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil], (*data_col) * (1-dist) * (1-dist));
atomicAdd(&data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor], (*data_col) * (1-dist) * dist);
atomicAdd(&data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil], (*data_col) * dist * (1-dist));
atomicAdd(&data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor], (*data_col) * dist * dist);
}
data_col += height_col * width_col;
}
}
/*
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[(n * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
if (dilation - iDilationFloor > 0.5) {
dilation = iDilationFloor + 1;
} else {
dilation = iDilationFloor;
}
int dilation_h = dilation;
int dilation_w = dilation;
int h_im = h * stride_h - dilation_h;
int w_im = w * stride_w - dilation_w;
data_im += ((n * channels + c_im) * height + h_im) * width + w_im;
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im + i * dilation_h >= 0 && h_im + i * dilation_h < height && w_im + j * dilation_w >= 0 && w_im + j * dilation_w < width) {
atomicAdd(&data_im[(i * dilation_h) * width + j * dilation_w], *data_col);
}
data_col += height_col * width_col;
}
}*/
}
}
template <typename Dtype>
void col2im_dynamic_gpu(const Dtype* data_col, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_im) {
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
caffe_gpu_set(num * channels * height * width, Dtype(0), data_im);
hipLaunchKernelGGL(( col2im_dynamic_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w,
height_col, width_col,
data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_dynamic_gpu<float>(const float* data_col, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_im);
template void col2im_dynamic_gpu<double>(const double* data_col, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_im);
template <typename Dtype>
__global__ void col2im_dynamic_back_gpu_kernel(const int n, const Dtype* data_col, const Dtype* input_data, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const Dtype* input_data_im_ceil_ceil = input_data; // input feature
const Dtype* input_data_im_ceil_floor = input_data;
const Dtype* input_data_im_floor_ceil = input_data;
const Dtype* input_data_im_floor_floor = input_data;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[((n * channels + c_im) * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == pad_h) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
Dtype dist = iDilationCeil - dilation;
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
input_data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
input_data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
input_data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
input_data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
val += (*data_col) * ((input_data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil]) * (2-2*dist) +
(input_data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor]) * (2*dist-1) +
(input_data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil]) * (2*dist-1) +
(input_data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor]) * (-2*dist));
}
data_col += height_col * width_col;
}
}
data_im[((n * channels + c_im) * height + h) * width + w] = val;
}
}
template <typename Dtype>
void col2im_dynamic_back_gpu(const Dtype* data_col, const Dtype* input_data, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_im) {
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
caffe_gpu_set(num * channels * height * width, Dtype(0), data_im);
hipLaunchKernelGGL(( col2im_dynamic_back_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, input_data, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w,
height_col, width_col,
data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_dynamic_back_gpu<float>(const float* data_col, const float* input_data, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_im);
template void col2im_dynamic_back_gpu<double>(const double* data_col, const double* input_data, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| 5cf25cfc4153c52ef10c05289295cf9e81198280.cu | #include <algorithm>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/common.cuh"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// function of max(0,1-|x-x_grid|)
template <typename Dtype>
__device__ Dtype max_abs(const Dtype x, const Dtype x_grid) {
if ((x_grid - x) * (x_grid - x) >= 1) {
return 0;
} else if (x_grid >= x) {
return 1 - x_grid + x;
} else {
return 1 - x + x_grid;
}
}
// derived function of max(0,1-|x-x_grid|)
template <typename Dtype>
__device__ Dtype diff_max_abs(const Dtype x, const Dtype x_grid) {
if ((x_grid - x) * (x_grid - x) >= 1) {
return 0;
} else if (x_grid >= x) {
return -1;
} else {
return 1;
}
}
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype>
__global__ void im2col_dynamic_gpu_kernel(const int n, const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* data_im_ceil_ceil = data_im;
const Dtype* data_im_ceil_floor = data_im;
const Dtype* data_im_floor_ceil = data_im;
const Dtype* data_im_floor_floor = data_im;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[((n * channels + c_im) * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == pad_h) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
double dist = iDilationCeil - dilation;
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
*data_col = data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil] * (1-dist) * (1-dist) +
data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor] * (1-dist) * dist +
data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil] * dist * (1-dist) +
data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor] * dist * dist;
}
else {
*data_col = 0.;
}
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_dynamic_gpu(const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_col) {
// We are going to launch num * channels * height_col * width_col kernels, each
// kernel responsible for copying a single block from a single image.
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_dynamic_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w,
height_col, width_col,
data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_dynamic_gpu<float>(const float* data_im, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_col);
template void im2col_dynamic_gpu<double>(const double* data_im, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_col);
template <typename Dtype>
__global__ void im2col_dynamic_back_gpu_kernel(const int n, const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int max_dilation,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_col, Dtype* data_dist) {
CUDA_KERNEL_LOOP(index, n) {
const Dtype* data_im_ceil_ceil = data_im;
const Dtype* data_im_ceil_floor = data_im;
const Dtype* data_im_floor_ceil = data_im;
const Dtype* data_im_floor_floor = data_im;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[h * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == max_dilation) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
// Dtype dist = dilation - iDilationFloor;
Dtype dist = iDilationCeil - dilation;
if (c_im == 0) {
data_dist[h * width + w] = dist;
}
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
// Dtype diff_h = i-(kernel_h-1)/2;
// Dtype diff_w = j-(kernel_w-1)/2;
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
/* *data_col = data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil] * (diff_cc_h * diff_h + diff_cc_w * diff_w) +
data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor] * (diff_cf_h * diff_h + diff_cf_w * diff_w) +
data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil] * (diff_fc_h * diff_h + diff_fc_w * diff_w) +
data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor] * (diff_ff_h * diff_h + diff_ff_w * diff_w);*/
*data_col = data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil] * (2 - 2*dist) +
data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor] * (2*dist - 1) +
data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil] * (2*dist - 1) +
data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor] * (-2*dist);
//*data_col = 0;
}
else {
*data_col = 0;
}
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_dynamic_back_gpu(const Dtype* data_im, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_col) {
// We are going to launch num * channels * height_col * width_col kernels, each
// kernel responsible for copying a single block from a single image.
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
Blob<Dtype> dilation_temp;
dilation_temp.Reshape(1, 1, height_col, width_col);
Dtype* pDilation = dilation_temp.mutable_gpu_data();
im2col_dynamic_back_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h,
stride_h, stride_w,
height_col, width_col,
data_col, pDilation);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_dynamic_back_gpu<float>(const float* data_im, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_col);
template void im2col_dynamic_back_gpu<double>(const double* data_im, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype>
__global__ void elementwise_multiply_kernel(int count, const Dtype* input, Dtype* output) {
CUDA_KERNEL_LOOP(index, count) {
output[index] = input[index]*output[index];
}
}
template <typename Dtype>
void elementwise_multiply(int count, const Dtype* input, Dtype* output) {
elementwise_multiply_kernel<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, input, output);
CUDA_POST_KERNEL_CHECK;
}
template void elementwise_multiply(int count, const double* input, double* output);
template void elementwise_multiply(int count, const float* input, float* output);
template <typename Dtype>
__global__ void col2im_dynamic_gpu_kernel(const int n, const Dtype* data_col, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype* data_im_ceil_ceil = data_im;
Dtype* data_im_ceil_floor = data_im;
Dtype* data_im_floor_ceil = data_im;
Dtype* data_im_floor_floor = data_im;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[((n * channels + c_im) * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == pad_h) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
Dtype dist = iDilationCeil - dilation;
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
atomicAdd(&data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil], (*data_col) * (1-dist) * (1-dist));
atomicAdd(&data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor], (*data_col) * (1-dist) * dist);
atomicAdd(&data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil], (*data_col) * dist * (1-dist));
atomicAdd(&data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor], (*data_col) * dist * dist);
}
data_col += height_col * width_col;
}
}
/*
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[(n * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
if (dilation - iDilationFloor > 0.5) {
dilation = iDilationFloor + 1;
} else {
dilation = iDilationFloor;
}
int dilation_h = dilation;
int dilation_w = dilation;
int h_im = h * stride_h - dilation_h;
int w_im = w * stride_w - dilation_w;
data_im += ((n * channels + c_im) * height + h_im) * width + w_im;
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im + i * dilation_h >= 0 && h_im + i * dilation_h < height && w_im + j * dilation_w >= 0 && w_im + j * dilation_w < width) {
atomicAdd(&data_im[(i * dilation_h) * width + j * dilation_w], *data_col);
}
data_col += height_col * width_col;
}
}*/
}
}
template <typename Dtype>
void col2im_dynamic_gpu(const Dtype* data_col, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_im) {
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
caffe_gpu_set(num * channels * height * width, Dtype(0), data_im);
col2im_dynamic_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w,
height_col, width_col,
data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_dynamic_gpu<float>(const float* data_col, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_im);
template void col2im_dynamic_gpu<double>(const double* data_col, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_im);
template <typename Dtype>
__global__ void col2im_dynamic_back_gpu_kernel(const int n, const Dtype* data_col, const Dtype* input_data, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const Dtype* input_data_im_ceil_ceil = input_data; // input feature
const Dtype* input_data_im_ceil_floor = input_data;
const Dtype* input_data_im_floor_ceil = input_data;
const Dtype* input_data_im_floor_floor = input_data;
int w = index % width_col;
index /= width_col;
int h = index % height_col;
index /= height_col;
int c_im = index % channels;
int n = index / channels;
Dtype dilation = dilation_im[((n * channels + c_im) * height + h) * width + w];
int iDilationFloor = static_cast<int>(dilation);
int iDilationCeil = iDilationFloor + 1;
if (iDilationFloor == pad_h) {
iDilationFloor = iDilationFloor - 1;
iDilationCeil = iDilationCeil - 1;
}
Dtype dist = iDilationCeil - dilation;
int h_im_ceil = h * stride_h - iDilationCeil;
int w_im_ceil = w * stride_w - iDilationCeil;
int h_im_floor = h * stride_h - iDilationFloor;
int w_im_floor = w * stride_w - iDilationFloor;
input_data_im_ceil_ceil += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_ceil; // patch start
input_data_im_ceil_floor += ((n * channels + c_im) * height + h_im_ceil) * width + w_im_floor; // patch start
input_data_im_floor_ceil += ((n * channels + c_im) * height + h_im_floor) * width + w_im_ceil; // patch start
input_data_im_floor_floor += ((n * channels + c_im) * height + h_im_floor) * width + w_im_floor; // patch start
int channels_col = channels * kernel_h * kernel_w;
int c = c_im * kernel_h * kernel_w;
data_col += ((n * channels_col + c) * height_col + h) * width_col + w;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
if (h_im_ceil + i * iDilationCeil >= 0 && h_im_ceil + i * iDilationCeil < height && w_im_ceil + j * iDilationCeil >= 0 && w_im_ceil + j * iDilationCeil < width) {
val += (*data_col) * ((input_data_im_ceil_ceil[(i * iDilationCeil) * width + j * iDilationCeil]) * (2-2*dist) +
(input_data_im_ceil_floor[(i * iDilationCeil) * width + j * iDilationFloor]) * (2*dist-1) +
(input_data_im_floor_ceil[(i * iDilationFloor) * width + j * iDilationCeil]) * (2*dist-1) +
(input_data_im_floor_floor[(i * iDilationFloor) * width + j * iDilationFloor]) * (-2*dist));
}
data_col += height_col * width_col;
}
}
data_im[((n * channels + c_im) * height + h) * width + w] = val;
}
}
template <typename Dtype>
void col2im_dynamic_back_gpu(const Dtype* data_col, const Dtype* input_data, const Dtype* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, Dtype* data_im) {
const int kernel_h_eff = kernel_h + (kernel_h - 1) * (pad_h - 1);
const int kernel_w_eff = kernel_w + (kernel_w - 1) * (pad_w - 1);
int height_col = (height + 2 * pad_h - kernel_h_eff) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w_eff) / stride_w + 1;
int num_kernels = num * channels * height_col * width_col;
caffe_gpu_set(num * channels * height * width, Dtype(0), data_im);
col2im_dynamic_back_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, input_data, dilation_im,
num, channels, height, width,
kernel_h, kernel_w, pad_h, pad_w,
stride_h, stride_w,
height_col, width_col,
data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_dynamic_back_gpu<float>(const float* data_col, const float* input_data, const float* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, float* data_im);
template void col2im_dynamic_back_gpu<double>(const double* data_col, const double* input_data, const double* dilation_im,
const int num, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
b06de324dd2538ab8bb51c5605eed359dea793f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by David Matthews on 5/21/20.
//
#include "../include/CollisionSystem.cuh"
CollisionSystem::CollisionSystem(size_t n, size_t maxCollisionsPerObject, bool toAllocHostVecs) : N(n), MAX_COLLISIONS_PER_OBJECT(maxCollisionsPerObject) {
start = thrust::counting_iterator<unsigned int>(0);
// allocate memory for positions
// host
allocatedByUs = toAllocHostVecs;
RESERVATION_SIZE = N;
hipMalloc((void**)&num_collisions_d_ptr, 1 * sizeof(int));
// CUB sort buffer
cub_sort_bytes_size = 1; // enlarge upon request in kernels.
hipMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
CUDA_CHECK_AFTER_CALL();
set_num_objects(n);
}
CollisionSystem::~CollisionSystem() {
if (allocatedByUs) {
delete[] x_pos_h;
delete[] y_pos_h;
delete[] z_pos_h;
delete[] radius_h;
delete[] host_collisions_a;
}
hipFree(cub_sort_bytes_ptr);
hipFree(x_pos_d_ptr);
hipFree(y_pos_d_ptr);
hipFree(z_pos_d_ptr);
hipFree(radius_d_ptr);
hipFree(tmp_pos_d_ptr);
hipFree(x_rank_d_ptr);
hipFree(y_rank_d_ptr);
hipFree(z_rank_d_ptr);
hipFree(tmp_id_a_d_ptr);
hipFree(tmp_id_b_d_ptr);
hipFree(mortons_d_ptr);
hipFree(mortons_tmp_d_ptr);
hipFree(mortons_id_d_ptr);
hipFree(leaf_parent_d_ptr);
hipFree(internal_parent_d_ptr);
hipFree(internal_childA_d_ptr);
hipFree(internal_childB_d_ptr);
hipFree(internal_node_bbox_complete_flag_d_ptr);
hipFree(bounding_boxes_d_ptr);
hipFree(potential_collisions_idx_d_ptr);
hipFree(potential_collisions_d_ptr);
hipFree(collisions_d_ptr);
}
void CollisionSystem::set_num_objects_host(size_t n) {
if (allocatedByUs) {
// avoid need for special case by ensuring host data is not null (only occurs when first constructing)
if (!x_pos_h) { x_pos_h = new float[RESERVATION_SIZE]; }
if (!y_pos_h) { y_pos_h = new float[RESERVATION_SIZE]; }
if (!z_pos_h) { z_pos_h = new float[RESERVATION_SIZE]; }
if (!radius_h) { radius_h = new float[RESERVATION_SIZE]; }
if (!host_collisions_a) { host_collisions_a = new Collision[RESERVATION_SIZE * MAX_COLLISIONS_PER_OBJECT]; }
float *tmp_x, *tmp_y, *tmp_z, *tmp_r;
tmp_x = new float[RESERVATION_SIZE];
thrust::copy(x_pos_h, x_pos_h + N, tmp_x);
delete[] x_pos_h;
x_pos_h = tmp_x;
tmp_y = new float[RESERVATION_SIZE];
thrust::copy(y_pos_h, y_pos_h + N, tmp_y);
delete[] y_pos_h;
y_pos_h = tmp_y;
tmp_z = new float[RESERVATION_SIZE];
thrust::copy(z_pos_h, z_pos_h + N, tmp_z);
delete[] z_pos_h;
z_pos_h = tmp_z;
tmp_r = new float[RESERVATION_SIZE];
thrust::copy(radius_h, radius_h + N, tmp_r);
delete[] radius_h;
radius_h = tmp_r;
auto *tmp_h = new Collision[RESERVATION_SIZE * MAX_COLLISIONS_PER_OBJECT];
thrust::copy(host_collisions_a, host_collisions_a + N, tmp_h);
delete[] host_collisions_a;
host_collisions_a = tmp_h;
}
}
__host__ __device__
void CollisionSystem::set_num_objects_device(size_t n) {
if (N != n) {
requiresRebuild = true;
}
N = n;
if (N > RESERVATION_SIZE) {
RESERVATION_SIZE = N;
} else if (!needAllocate) {
update_device_pointers_and_functors();
return;
}
// device
hipFree(x_pos_d_ptr);
hipMalloc((void**)&x_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
hipFree(y_pos_d_ptr);
hipMalloc((void**)&y_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
hipFree(z_pos_d_ptr);
hipMalloc((void**)&z_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
hipFree(radius_d_ptr);
hipMalloc((void**)&radius_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
hipFree(tmp_pos_d_ptr);
hipMalloc((void**)&tmp_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
// alloc rank and id vectors.
hipFree(x_rank_d_ptr);
hipMalloc((void**)&x_rank_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(y_rank_d_ptr);
hipMalloc((void**)&y_rank_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(z_rank_d_ptr);
hipMalloc((void**)&z_rank_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(tmp_id_a_d_ptr);
hipMalloc((void**)&tmp_id_a_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(tmp_id_b_d_ptr);
hipMalloc((void**)&tmp_id_b_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// allocate Morton number vectors
hipFree(mortons_d_ptr);
hipMalloc((void**)&mortons_d_ptr, RESERVATION_SIZE * sizeof(unsigned long long int));
CUDA_CHECK_AFTER_CALL();
hipFree(mortons_tmp_d_ptr);
hipMalloc((void**)&mortons_tmp_d_ptr, RESERVATION_SIZE * sizeof(unsigned long long int));
CUDA_CHECK_AFTER_CALL();
hipFree(mortons_id_d_ptr);
hipMalloc((void**)&mortons_id_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// alloc vectors for the BVH tree
// for the leaf nodes
hipFree(leaf_parent_d_ptr);
hipMalloc((void**)&leaf_parent_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// for the internal nodes
hipFree(internal_parent_d_ptr);
hipMalloc((void**)&internal_parent_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(internal_childA_d_ptr);
hipMalloc((void**)&internal_childA_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(internal_childB_d_ptr);
hipMalloc((void**)&internal_childB_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// for the bounding boxes for all leaf and internal nodes.
hipFree(internal_node_bbox_complete_flag_d_ptr);
hipMalloc((void**)&internal_node_bbox_complete_flag_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
hipFree(bounding_boxes_d_ptr);
hipMalloc((void**)&bounding_boxes_d_ptr, (2 * RESERVATION_SIZE - 1) * sizeof(BoundingBox));
CUDA_CHECK_AFTER_CALL();
hipFree(potential_collisions_idx_d_ptr);
hipMalloc((void**)&potential_collisions_idx_d_ptr, (1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
assert(MAX_COLLISIONS_PER_OBJECT * N != 0); // "Size of potential_collisions array must be > 0";
// TODO: can we automatically expand collision memory as needed?
hipFree(potential_collisions_d_ptr);
hipMalloc((void**)&potential_collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
hipFree(collisions_d_ptr);
hipMalloc((void**)&collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
update_device_pointers_and_functors();
// init flags to zero.
thrust::fill(thrust::device, internal_node_bbox_complete_flag_d_ptr, internal_node_bbox_complete_flag_d_ptr + N - 1, 0);
needAllocate = false;
}
void CollisionSystem::set_max_num_cols_per_mass(size_t m) {
MAX_COLLISIONS_PER_OBJECT = m;
hipFree(potential_collisions_d_ptr);
hipMalloc((void**)&potential_collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
hipFree(collisions_d_ptr);
hipMalloc((void**)&collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
update_device_pointers_and_functors();
}
__host__ __device__
void CollisionSystem::update_device_pointers_and_functors() {
compute_morton_numbers = init_morton_func(x_rank_d_ptr,
y_rank_d_ptr,
z_rank_d_ptr,
mortons_d_ptr,
mortons_id_d_ptr);
build_bvh_tree = build_bvh_tree_func(N,
mortons_d_ptr,
leaf_parent_d_ptr,
internal_parent_d_ptr,
internal_childA_d_ptr,
internal_childB_d_ptr);
compute_bounding_boxes = fill_bvh_tree_with_bounding_boxes_func(N,
bounding_boxes_d_ptr,
x_pos_d_ptr,
y_pos_d_ptr,
z_pos_d_ptr,
radius_d_ptr,
mortons_id_d_ptr,
leaf_parent_d_ptr,
internal_parent_d_ptr,
internal_childA_d_ptr,
internal_childB_d_ptr,
internal_node_bbox_complete_flag_d_ptr);
find_potential_collisions = find_potential_collisions_func(N,
N * MAX_COLLISIONS_PER_OBJECT,
mortons_id_d_ptr, bounding_boxes_d_ptr, internal_childA_d_ptr, internal_childB_d_ptr, potential_collisions_idx_d_ptr,
potential_collisions_d_ptr, x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr);
check_potential_collisions = check_potential_collisions_func(x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr);
}
void CollisionSystem::update_all_from_host() {
update_x_pos_from_host();
update_y_pos_from_host();
update_z_pos_from_host();
update_radius_from_host();
}
void CollisionSystem::update_x_pos_from_host() {
hipMemcpy(x_pos_d_ptr, x_pos_h, sizeof(float) * N, hipMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();
}
void CollisionSystem::update_y_pos_from_host() {
hipMemcpy(y_pos_d_ptr, y_pos_h, sizeof(float) * N, hipMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();}
void CollisionSystem::update_z_pos_from_host() {
hipMemcpy(z_pos_d_ptr, z_pos_h, sizeof(float) * N, hipMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();}
void CollisionSystem::update_radius_from_host() {
hipMemcpy(radius_d_ptr, radius_h, sizeof(float) * N, hipMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();}
void CollisionSystem::init() {
// copy from host to device
update_all_from_host();
// compute ranks
update_x_pos_ranks();
update_y_pos_ranks();
update_z_pos_ranks();
// build and sort mortons
update_mortons();
// build BVH tree
build_tree();
// fill BVH tree with bounding boxes
update_bounding_boxes();
}
__host__ __device__
void CollisionSystem::update_x_pos_ranks() {
// keep track of x object ids after sorting.
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
size_t curr_size;
hipcub::DeviceRadixSort::SortPairs(NULL, curr_size, x_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
hipFree(cub_sort_bytes_ptr);
hipMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
hipcub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, x_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
// save the new rank information
thrust::scatter(thrust::device, start, start + N, tmp_id_b_d_ptr, x_rank_d_ptr);
}
__host__ __device__
void CollisionSystem::update_y_pos_ranks() {
// keep track of y object ids after sorting
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
// sort the positions to determine rank.
size_t curr_size;
hipcub::DeviceRadixSort::SortPairs(NULL, curr_size, y_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
hipFree(cub_sort_bytes_ptr);
hipMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
hipcub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, y_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
// save the new rank information
thrust::scatter(thrust::device, start, start + N, tmp_id_b_d_ptr, y_rank_d_ptr);
}
__host__ __device__
void CollisionSystem::update_z_pos_ranks() {
// keep track of z object ids after sorting
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
// sort the positions to determine rank.
size_t curr_size;
hipcub::DeviceRadixSort::SortPairs(NULL, curr_size, z_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
hipFree(cub_sort_bytes_ptr);
hipMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
hipcub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, z_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
// save the new rank information
thrust::scatter(thrust::device, start, start + N, tmp_id_b_d_ptr, z_rank_d_ptr);
}
__host__ __device__
void CollisionSystem::update_mortons() {
// keep track of object ids after sorting.
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
// build morton numbers.
thrust::for_each(thrust::device, start, start + N, compute_morton_numbers);
thrust::copy(thrust::device, mortons_d_ptr, mortons_d_ptr + N, mortons_tmp_d_ptr); // copy mortons to tmp array as source for sorting.
// sort morton numbers
size_t curr_size;
hipcub::DeviceRadixSort::SortPairs(NULL, curr_size, mortons_tmp_d_ptr, mortons_d_ptr, tmp_id_a_d_ptr, mortons_id_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
hipFree(cub_sort_bytes_ptr);
hipMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
hipcub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, mortons_tmp_d_ptr, mortons_d_ptr, tmp_id_a_d_ptr, mortons_id_d_ptr, N);
}
__host__ __device__
void CollisionSystem::update_mortons_fast(float2 xlims, float2 ylims, float2 zlims) {
thrust::sequence(thrust::device, mortons_id_d_ptr, mortons_id_d_ptr + N);
// build morton numbers using the Karras method.
// this will be faster if we are not simulating swarms of particles (e.g. if voxels are evenly distributed across range)
// but slower if we are simulating swarms of particles that encompass large amounts of area and are not evenly distributed
// e.g. voxels clumping to form new robots would likely be slower with this method.
thrust::for_each(thrust::device, start,
start + N,
init_morton_func_fast(xlims,
ylims,
zlims,
x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, mortons_d_ptr, mortons_id_d_ptr));
// sort morton numbers
thrust::sort_by_key(thrust::device, mortons_d_ptr, mortons_d_ptr + N, mortons_id_d_ptr);
}
__host__ __device__
void CollisionSystem::build_tree() {
build_bvh_tree.N = N;
thrust::for_each(thrust::device, start, start + N - 1 , build_bvh_tree);
// int num_SM, curDeviceId, gridSize, blockSize;
// hipGetDevice(&curDeviceId);
// hipDeviceGetAttribute(&num_SM, hipDeviceAttributeMultiprocessorCount, curDeviceId);
// blockSize = (int)(N-1)/num_SM;
// if (num_SM * blockSize < (N-1)) {
// blockSize += 1;
// }
// if (blockSize > 256) {
// blockSize = 256;
// gridSize = ((int)N + 254)/256; // N - 1 + 255 leaf nodes.
// } else {
// gridSize = num_SM;
// }
// build_tree_kernel<<<gridSize, blockSize>>>(0, N - 1, build_bvh_tree);
// CUDA_CHECK_AFTER_CALL();
// VcudaDeviceSynchronize();
}
__host__ __device__
void CollisionSystem::update_bounding_boxes() {
compute_bounding_boxes.N = N;
thrust::for_each(thrust::device, start, start + N, compute_bounding_boxes);
}
__host__
bool CollisionSystem::check_collisions(float pX, float pY, float pZ, float pR) {
thrust::device_vector<bool> result(1);
hipLaunchKernelGGL(( check_collisions_single), dim3(1),dim3(1), 0, 0, find_potential_collisions, pX, pY, pZ, pR, thrust::raw_pointer_cast(result.data()));
return result[0];
}
__device__
bool CollisionSystem::check_collisions_device(float pX, float pY, float pZ, float pR) {
return find_potential_collisions.test_collision(pX, pY, pZ, pR);
}
__host__
int CollisionSystem::find_collisions() {
hipMemset(potential_collisions_idx_d_ptr, 0, sizeof(unsigned int));
find_potential_collisions.N = N;
find_potential_collisions.NUM_INTERNAL = N - 1;
thrust::for_each(thrust::device, start + N - 1, start + 2 * N - 1, find_potential_collisions);
unsigned int h_potential_collision_idx;
hipMemcpy((void*)&h_potential_collision_idx, (void*)potential_collisions_idx_d_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (h_potential_collision_idx > MAX_COLLISIONS_PER_OBJECT * N) {
hipMemset(num_collisions_d_ptr, -1, sizeof(int));
return -1;
}
unsigned int colCount = thrust::copy_if(thrust::device, potential_collisions_d_ptr,
potential_collisions_d_ptr + h_potential_collision_idx,
collisions_d_ptr,
check_potential_collisions) - collisions_d_ptr;
hipMemset(num_collisions_d_ptr, colCount, sizeof(int));
return colCount;
}
__device__
int CollisionSystem::find_collisions_device(int pruneLevel) {
potential_collisions_idx_d_ptr[0] = 0;
find_potential_collisions.N = N;
find_potential_collisions.NUM_INTERNAL = N - 1;
int num_SM, curDeviceId, gridSize, blockSize;
hipGetDevice(&curDeviceId);
hipDeviceGetAttribute(&num_SM, hipDeviceAttributeMultiprocessorCount, curDeviceId);
blockSize = (int)N/num_SM;
if (num_SM * blockSize < N) {
blockSize += 1;
}
if (blockSize > 256) {
blockSize = 256;
gridSize = ((int)N + 255)/256;
} else {
gridSize = num_SM;
}
hipLaunchKernelGGL(( find_potential_collisions_kernel), dim3(gridSize), dim3(blockSize), 0, 0, N - 1, N, find_potential_collisions);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();
if (potential_collisions_idx_d_ptr[0] > MAX_COLLISIONS_PER_OBJECT * N) {
num_collisions_d_ptr[0] = -1;
return -1;
}
if (pruneLevel == 0) {
num_collisions_d_ptr[0] = (int) potential_collisions_idx_d_ptr[0];
thrust::copy(thrust::device, potential_collisions_d_ptr,potential_collisions_d_ptr + potential_collisions_idx_d_ptr[0], collisions_d_ptr);
} else {
unsigned int colCount = thrust::copy_if(thrust::device, potential_collisions_d_ptr,
potential_collisions_d_ptr + potential_collisions_idx_d_ptr[0],
collisions_d_ptr,
check_potential_collisions) - collisions_d_ptr;
num_collisions_d_ptr[0] = (int) colCount;
}
return num_collisions_d_ptr[0];
}
__host__
int CollisionSystem::find_collisions_N2() {
auto keys_a_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 / N);
auto keys_b_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 % N);
auto keys_zip_start = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start, keys_b_start));
auto keys_zip_end = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start + N * N, keys_b_start + N * N));
hipMemset(potential_collisions_idx_d_ptr, 0, sizeof(unsigned int));
thrust::for_each(thrust::device, keys_zip_start,
keys_zip_end,
check_potential_collisions_N2_func(N * MAX_COLLISIONS_PER_OBJECT,
potential_collisions_idx_d_ptr, x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr, collisions_d_ptr));
unsigned int num_collisions_h;
hipMemcpy((void*)&num_collisions_h, (void*)potential_collisions_idx_d_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (num_collisions_h > MAX_COLLISIONS_PER_OBJECT * N) {
hipMemset(num_collisions_d_ptr, -1, sizeof(int));
return -1;
}
hipMemset(num_collisions_d_ptr, num_collisions_h, sizeof(int));
return num_collisions_h;
}
__device__
int CollisionSystem::find_collisions_N2_device() {
auto keys_a_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 / N);
auto keys_b_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 % N);
auto keys_zip_start = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start, keys_b_start));
auto keys_zip_end = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start + N * N, keys_b_start + N * N));
potential_collisions_idx_d_ptr[0] = 0;
thrust::for_each(thrust::device, keys_zip_start,
keys_zip_end,
check_potential_collisions_N2_func(N * MAX_COLLISIONS_PER_OBJECT,
potential_collisions_idx_d_ptr, x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr, collisions_d_ptr));
if (potential_collisions_idx_d_ptr[0] > MAX_COLLISIONS_PER_OBJECT * N) {
return -1;
}
num_collisions_d_ptr[0] = potential_collisions_idx_d_ptr[0];
return potential_collisions_idx_d_ptr[0];
}
__global__ void check_collisions_single(find_potential_collisions_func functor, float pX, float pY, float pZ, float pR, bool *b) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid == 0) {
*b = functor.test_collision(pX, pY, pZ, pR);
}
}
__global__ void find_potential_collisions_kernel(int startIdx, int num, find_potential_collisions_func functor) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < num) {
functor(tid + startIdx);
}
}
__global__ void build_tree_kernel(int startIdx, int num, build_bvh_tree_func functor) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < num) {
functor(tid + startIdx);
}
} | b06de324dd2538ab8bb51c5605eed359dea793f4.cu | //
// Created by David Matthews on 5/21/20.
//
#include "../include/CollisionSystem.cuh"
CollisionSystem::CollisionSystem(size_t n, size_t maxCollisionsPerObject, bool toAllocHostVecs) : N(n), MAX_COLLISIONS_PER_OBJECT(maxCollisionsPerObject) {
start = thrust::counting_iterator<unsigned int>(0);
// allocate memory for positions
// host
allocatedByUs = toAllocHostVecs;
RESERVATION_SIZE = N;
cudaMalloc((void**)&num_collisions_d_ptr, 1 * sizeof(int));
// CUB sort buffer
cub_sort_bytes_size = 1; // enlarge upon request in kernels.
cudaMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
CUDA_CHECK_AFTER_CALL();
set_num_objects(n);
}
CollisionSystem::~CollisionSystem() {
if (allocatedByUs) {
delete[] x_pos_h;
delete[] y_pos_h;
delete[] z_pos_h;
delete[] radius_h;
delete[] host_collisions_a;
}
cudaFree(cub_sort_bytes_ptr);
cudaFree(x_pos_d_ptr);
cudaFree(y_pos_d_ptr);
cudaFree(z_pos_d_ptr);
cudaFree(radius_d_ptr);
cudaFree(tmp_pos_d_ptr);
cudaFree(x_rank_d_ptr);
cudaFree(y_rank_d_ptr);
cudaFree(z_rank_d_ptr);
cudaFree(tmp_id_a_d_ptr);
cudaFree(tmp_id_b_d_ptr);
cudaFree(mortons_d_ptr);
cudaFree(mortons_tmp_d_ptr);
cudaFree(mortons_id_d_ptr);
cudaFree(leaf_parent_d_ptr);
cudaFree(internal_parent_d_ptr);
cudaFree(internal_childA_d_ptr);
cudaFree(internal_childB_d_ptr);
cudaFree(internal_node_bbox_complete_flag_d_ptr);
cudaFree(bounding_boxes_d_ptr);
cudaFree(potential_collisions_idx_d_ptr);
cudaFree(potential_collisions_d_ptr);
cudaFree(collisions_d_ptr);
}
void CollisionSystem::set_num_objects_host(size_t n) {
if (allocatedByUs) {
// avoid need for special case by ensuring host data is not null (only occurs when first constructing)
if (!x_pos_h) { x_pos_h = new float[RESERVATION_SIZE]; }
if (!y_pos_h) { y_pos_h = new float[RESERVATION_SIZE]; }
if (!z_pos_h) { z_pos_h = new float[RESERVATION_SIZE]; }
if (!radius_h) { radius_h = new float[RESERVATION_SIZE]; }
if (!host_collisions_a) { host_collisions_a = new Collision[RESERVATION_SIZE * MAX_COLLISIONS_PER_OBJECT]; }
float *tmp_x, *tmp_y, *tmp_z, *tmp_r;
tmp_x = new float[RESERVATION_SIZE];
thrust::copy(x_pos_h, x_pos_h + N, tmp_x);
delete[] x_pos_h;
x_pos_h = tmp_x;
tmp_y = new float[RESERVATION_SIZE];
thrust::copy(y_pos_h, y_pos_h + N, tmp_y);
delete[] y_pos_h;
y_pos_h = tmp_y;
tmp_z = new float[RESERVATION_SIZE];
thrust::copy(z_pos_h, z_pos_h + N, tmp_z);
delete[] z_pos_h;
z_pos_h = tmp_z;
tmp_r = new float[RESERVATION_SIZE];
thrust::copy(radius_h, radius_h + N, tmp_r);
delete[] radius_h;
radius_h = tmp_r;
auto *tmp_h = new Collision[RESERVATION_SIZE * MAX_COLLISIONS_PER_OBJECT];
thrust::copy(host_collisions_a, host_collisions_a + N, tmp_h);
delete[] host_collisions_a;
host_collisions_a = tmp_h;
}
}
__host__ __device__
void CollisionSystem::set_num_objects_device(size_t n) {
if (N != n) {
requiresRebuild = true;
}
N = n;
if (N > RESERVATION_SIZE) {
RESERVATION_SIZE = N;
} else if (!needAllocate) {
update_device_pointers_and_functors();
return;
}
// device
cudaFree(x_pos_d_ptr);
cudaMalloc((void**)&x_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
cudaFree(y_pos_d_ptr);
cudaMalloc((void**)&y_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
cudaFree(z_pos_d_ptr);
cudaMalloc((void**)&z_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
cudaFree(radius_d_ptr);
cudaMalloc((void**)&radius_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
cudaFree(tmp_pos_d_ptr);
cudaMalloc((void**)&tmp_pos_d_ptr, RESERVATION_SIZE * sizeof(float));
CUDA_CHECK_AFTER_CALL();
// alloc rank and id vectors.
cudaFree(x_rank_d_ptr);
cudaMalloc((void**)&x_rank_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(y_rank_d_ptr);
cudaMalloc((void**)&y_rank_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(z_rank_d_ptr);
cudaMalloc((void**)&z_rank_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(tmp_id_a_d_ptr);
cudaMalloc((void**)&tmp_id_a_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(tmp_id_b_d_ptr);
cudaMalloc((void**)&tmp_id_b_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// allocate Morton number vectors
cudaFree(mortons_d_ptr);
cudaMalloc((void**)&mortons_d_ptr, RESERVATION_SIZE * sizeof(unsigned long long int));
CUDA_CHECK_AFTER_CALL();
cudaFree(mortons_tmp_d_ptr);
cudaMalloc((void**)&mortons_tmp_d_ptr, RESERVATION_SIZE * sizeof(unsigned long long int));
CUDA_CHECK_AFTER_CALL();
cudaFree(mortons_id_d_ptr);
cudaMalloc((void**)&mortons_id_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// alloc vectors for the BVH tree
// for the leaf nodes
cudaFree(leaf_parent_d_ptr);
cudaMalloc((void**)&leaf_parent_d_ptr, RESERVATION_SIZE * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// for the internal nodes
cudaFree(internal_parent_d_ptr);
cudaMalloc((void**)&internal_parent_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(internal_childA_d_ptr);
cudaMalloc((void**)&internal_childA_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(internal_childB_d_ptr);
cudaMalloc((void**)&internal_childB_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
// for the bounding boxes for all leaf and internal nodes.
cudaFree(internal_node_bbox_complete_flag_d_ptr);
cudaMalloc((void**)&internal_node_bbox_complete_flag_d_ptr, (RESERVATION_SIZE - 1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
cudaFree(bounding_boxes_d_ptr);
cudaMalloc((void**)&bounding_boxes_d_ptr, (2 * RESERVATION_SIZE - 1) * sizeof(BoundingBox));
CUDA_CHECK_AFTER_CALL();
cudaFree(potential_collisions_idx_d_ptr);
cudaMalloc((void**)&potential_collisions_idx_d_ptr, (1) * sizeof(unsigned int));
CUDA_CHECK_AFTER_CALL();
assert(MAX_COLLISIONS_PER_OBJECT * N != 0); // "Size of potential_collisions array must be > 0";
// TODO: can we automatically expand collision memory as needed?
cudaFree(potential_collisions_d_ptr);
cudaMalloc((void**)&potential_collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
cudaFree(collisions_d_ptr);
cudaMalloc((void**)&collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
update_device_pointers_and_functors();
// init flags to zero.
thrust::fill(thrust::device, internal_node_bbox_complete_flag_d_ptr, internal_node_bbox_complete_flag_d_ptr + N - 1, 0);
needAllocate = false;
}
void CollisionSystem::set_max_num_cols_per_mass(size_t m) {
MAX_COLLISIONS_PER_OBJECT = m;
cudaFree(potential_collisions_d_ptr);
cudaMalloc((void**)&potential_collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
cudaFree(collisions_d_ptr);
cudaMalloc((void**)&collisions_d_ptr, (MAX_COLLISIONS_PER_OBJECT * RESERVATION_SIZE) * sizeof(Collision));
CUDA_CHECK_AFTER_CALL();
update_device_pointers_and_functors();
}
__host__ __device__
void CollisionSystem::update_device_pointers_and_functors() {
compute_morton_numbers = init_morton_func(x_rank_d_ptr,
y_rank_d_ptr,
z_rank_d_ptr,
mortons_d_ptr,
mortons_id_d_ptr);
build_bvh_tree = build_bvh_tree_func(N,
mortons_d_ptr,
leaf_parent_d_ptr,
internal_parent_d_ptr,
internal_childA_d_ptr,
internal_childB_d_ptr);
compute_bounding_boxes = fill_bvh_tree_with_bounding_boxes_func(N,
bounding_boxes_d_ptr,
x_pos_d_ptr,
y_pos_d_ptr,
z_pos_d_ptr,
radius_d_ptr,
mortons_id_d_ptr,
leaf_parent_d_ptr,
internal_parent_d_ptr,
internal_childA_d_ptr,
internal_childB_d_ptr,
internal_node_bbox_complete_flag_d_ptr);
find_potential_collisions = find_potential_collisions_func(N,
N * MAX_COLLISIONS_PER_OBJECT,
mortons_id_d_ptr, bounding_boxes_d_ptr, internal_childA_d_ptr, internal_childB_d_ptr, potential_collisions_idx_d_ptr,
potential_collisions_d_ptr, x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr);
check_potential_collisions = check_potential_collisions_func(x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr);
}
void CollisionSystem::update_all_from_host() {
update_x_pos_from_host();
update_y_pos_from_host();
update_z_pos_from_host();
update_radius_from_host();
}
void CollisionSystem::update_x_pos_from_host() {
cudaMemcpy(x_pos_d_ptr, x_pos_h, sizeof(float) * N, cudaMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();
}
void CollisionSystem::update_y_pos_from_host() {
cudaMemcpy(y_pos_d_ptr, y_pos_h, sizeof(float) * N, cudaMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();}
void CollisionSystem::update_z_pos_from_host() {
cudaMemcpy(z_pos_d_ptr, z_pos_h, sizeof(float) * N, cudaMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();}
void CollisionSystem::update_radius_from_host() {
cudaMemcpy(radius_d_ptr, radius_h, sizeof(float) * N, cudaMemcpyHostToDevice);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();}
void CollisionSystem::init() {
// copy from host to device
update_all_from_host();
// compute ranks
update_x_pos_ranks();
update_y_pos_ranks();
update_z_pos_ranks();
// build and sort mortons
update_mortons();
// build BVH tree
build_tree();
// fill BVH tree with bounding boxes
update_bounding_boxes();
}
__host__ __device__
void CollisionSystem::update_x_pos_ranks() {
// keep track of x object ids after sorting.
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
size_t curr_size;
cub::DeviceRadixSort::SortPairs(NULL, curr_size, x_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
cudaFree(cub_sort_bytes_ptr);
cudaMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
cub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, x_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
// save the new rank information
thrust::scatter(thrust::device, start, start + N, tmp_id_b_d_ptr, x_rank_d_ptr);
}
__host__ __device__
void CollisionSystem::update_y_pos_ranks() {
// keep track of y object ids after sorting
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
// sort the positions to determine rank.
size_t curr_size;
cub::DeviceRadixSort::SortPairs(NULL, curr_size, y_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
cudaFree(cub_sort_bytes_ptr);
cudaMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
cub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, y_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
// save the new rank information
thrust::scatter(thrust::device, start, start + N, tmp_id_b_d_ptr, y_rank_d_ptr);
}
__host__ __device__
void CollisionSystem::update_z_pos_ranks() {
// keep track of z object ids after sorting
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
// sort the positions to determine rank.
size_t curr_size;
cub::DeviceRadixSort::SortPairs(NULL, curr_size, z_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
cudaFree(cub_sort_bytes_ptr);
cudaMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
cub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, z_pos_d_ptr, tmp_pos_d_ptr, tmp_id_a_d_ptr, tmp_id_b_d_ptr, N);
// save the new rank information
thrust::scatter(thrust::device, start, start + N, tmp_id_b_d_ptr, z_rank_d_ptr);
}
__host__ __device__
void CollisionSystem::update_mortons() {
// keep track of object ids after sorting.
thrust::sequence(thrust::device, tmp_id_a_d_ptr, tmp_id_a_d_ptr + N);
// build morton numbers.
thrust::for_each(thrust::device, start, start + N, compute_morton_numbers);
thrust::copy(thrust::device, mortons_d_ptr, mortons_d_ptr + N, mortons_tmp_d_ptr); // copy mortons to tmp array as source for sorting.
// sort morton numbers
size_t curr_size;
cub::DeviceRadixSort::SortPairs(NULL, curr_size, mortons_tmp_d_ptr, mortons_d_ptr, tmp_id_a_d_ptr, mortons_id_d_ptr, N);
if (curr_size > cub_sort_bytes_size) {
cub_sort_bytes_size = curr_size;
cudaFree(cub_sort_bytes_ptr);
cudaMalloc(&cub_sort_bytes_ptr, cub_sort_bytes_size);
}
cub::DeviceRadixSort::SortPairs(cub_sort_bytes_ptr, curr_size, mortons_tmp_d_ptr, mortons_d_ptr, tmp_id_a_d_ptr, mortons_id_d_ptr, N);
}
__host__ __device__
void CollisionSystem::update_mortons_fast(float2 xlims, float2 ylims, float2 zlims) {
thrust::sequence(thrust::device, mortons_id_d_ptr, mortons_id_d_ptr + N);
// build morton numbers using the Karras method.
// this will be faster if we are not simulating swarms of particles (e.g. if voxels are evenly distributed across range)
// but slower if we are simulating swarms of particles that encompass large amounts of area and are not evenly distributed
// e.g. voxels clumping to form new robots would likely be slower with this method.
thrust::for_each(thrust::device, start,
start + N,
init_morton_func_fast(xlims,
ylims,
zlims,
x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, mortons_d_ptr, mortons_id_d_ptr));
// sort morton numbers
thrust::sort_by_key(thrust::device, mortons_d_ptr, mortons_d_ptr + N, mortons_id_d_ptr);
}
__host__ __device__
void CollisionSystem::build_tree() {
build_bvh_tree.N = N;
thrust::for_each(thrust::device, start, start + N - 1 , build_bvh_tree);
// int num_SM, curDeviceId, gridSize, blockSize;
// cudaGetDevice(&curDeviceId);
// cudaDeviceGetAttribute(&num_SM, cudaDevAttrMultiProcessorCount, curDeviceId);
// blockSize = (int)(N-1)/num_SM;
// if (num_SM * blockSize < (N-1)) {
// blockSize += 1;
// }
// if (blockSize > 256) {
// blockSize = 256;
// gridSize = ((int)N + 254)/256; // N - 1 + 255 leaf nodes.
// } else {
// gridSize = num_SM;
// }
// build_tree_kernel<<<gridSize, blockSize>>>(0, N - 1, build_bvh_tree);
// CUDA_CHECK_AFTER_CALL();
// VcudaDeviceSynchronize();
}
__host__ __device__
void CollisionSystem::update_bounding_boxes() {
compute_bounding_boxes.N = N;
thrust::for_each(thrust::device, start, start + N, compute_bounding_boxes);
}
__host__
bool CollisionSystem::check_collisions(float pX, float pY, float pZ, float pR) {
thrust::device_vector<bool> result(1);
check_collisions_single<<<1,1>>>(find_potential_collisions, pX, pY, pZ, pR, thrust::raw_pointer_cast(result.data()));
return result[0];
}
__device__
bool CollisionSystem::check_collisions_device(float pX, float pY, float pZ, float pR) {
return find_potential_collisions.test_collision(pX, pY, pZ, pR);
}
__host__
int CollisionSystem::find_collisions() {
cudaMemset(potential_collisions_idx_d_ptr, 0, sizeof(unsigned int));
find_potential_collisions.N = N;
find_potential_collisions.NUM_INTERNAL = N - 1;
thrust::for_each(thrust::device, start + N - 1, start + 2 * N - 1, find_potential_collisions);
unsigned int h_potential_collision_idx;
cudaMemcpy((void*)&h_potential_collision_idx, (void*)potential_collisions_idx_d_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (h_potential_collision_idx > MAX_COLLISIONS_PER_OBJECT * N) {
cudaMemset(num_collisions_d_ptr, -1, sizeof(int));
return -1;
}
unsigned int colCount = thrust::copy_if(thrust::device, potential_collisions_d_ptr,
potential_collisions_d_ptr + h_potential_collision_idx,
collisions_d_ptr,
check_potential_collisions) - collisions_d_ptr;
cudaMemset(num_collisions_d_ptr, colCount, sizeof(int));
return colCount;
}
__device__
int CollisionSystem::find_collisions_device(int pruneLevel) {
potential_collisions_idx_d_ptr[0] = 0;
find_potential_collisions.N = N;
find_potential_collisions.NUM_INTERNAL = N - 1;
int num_SM, curDeviceId, gridSize, blockSize;
cudaGetDevice(&curDeviceId);
cudaDeviceGetAttribute(&num_SM, cudaDevAttrMultiProcessorCount, curDeviceId);
blockSize = (int)N/num_SM;
if (num_SM * blockSize < N) {
blockSize += 1;
}
if (blockSize > 256) {
blockSize = 256;
gridSize = ((int)N + 255)/256;
} else {
gridSize = num_SM;
}
find_potential_collisions_kernel<<<gridSize, blockSize>>>(N - 1, N, find_potential_collisions);
CUDA_CHECK_AFTER_CALL();
VcudaDeviceSynchronize();
if (potential_collisions_idx_d_ptr[0] > MAX_COLLISIONS_PER_OBJECT * N) {
num_collisions_d_ptr[0] = -1;
return -1;
}
if (pruneLevel == 0) {
num_collisions_d_ptr[0] = (int) potential_collisions_idx_d_ptr[0];
thrust::copy(thrust::device, potential_collisions_d_ptr,potential_collisions_d_ptr + potential_collisions_idx_d_ptr[0], collisions_d_ptr);
} else {
unsigned int colCount = thrust::copy_if(thrust::device, potential_collisions_d_ptr,
potential_collisions_d_ptr + potential_collisions_idx_d_ptr[0],
collisions_d_ptr,
check_potential_collisions) - collisions_d_ptr;
num_collisions_d_ptr[0] = (int) colCount;
}
return num_collisions_d_ptr[0];
}
__host__
int CollisionSystem::find_collisions_N2() {
auto keys_a_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 / N);
auto keys_b_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 % N);
auto keys_zip_start = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start, keys_b_start));
auto keys_zip_end = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start + N * N, keys_b_start + N * N));
cudaMemset(potential_collisions_idx_d_ptr, 0, sizeof(unsigned int));
thrust::for_each(thrust::device, keys_zip_start,
keys_zip_end,
check_potential_collisions_N2_func(N * MAX_COLLISIONS_PER_OBJECT,
potential_collisions_idx_d_ptr, x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr, collisions_d_ptr));
unsigned int num_collisions_h;
cudaMemcpy((void*)&num_collisions_h, (void*)potential_collisions_idx_d_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (num_collisions_h > MAX_COLLISIONS_PER_OBJECT * N) {
cudaMemset(num_collisions_d_ptr, -1, sizeof(int));
return -1;
}
cudaMemset(num_collisions_d_ptr, num_collisions_h, sizeof(int));
return num_collisions_h;
}
__device__
int CollisionSystem::find_collisions_N2_device() {
auto keys_a_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 / N);
auto keys_b_start = thrust::make_transform_iterator(start, thrust::placeholders::_1 % N);
auto keys_zip_start = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start, keys_b_start));
auto keys_zip_end = thrust::make_zip_iterator(thrust::make_tuple(keys_a_start + N * N, keys_b_start + N * N));
potential_collisions_idx_d_ptr[0] = 0;
thrust::for_each(thrust::device, keys_zip_start,
keys_zip_end,
check_potential_collisions_N2_func(N * MAX_COLLISIONS_PER_OBJECT,
potential_collisions_idx_d_ptr, x_pos_d_ptr, y_pos_d_ptr, z_pos_d_ptr, radius_d_ptr, collisions_d_ptr));
if (potential_collisions_idx_d_ptr[0] > MAX_COLLISIONS_PER_OBJECT * N) {
return -1;
}
num_collisions_d_ptr[0] = potential_collisions_idx_d_ptr[0];
return potential_collisions_idx_d_ptr[0];
}
__global__ void check_collisions_single(find_potential_collisions_func functor, float pX, float pY, float pZ, float pR, bool *b) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid == 0) {
*b = functor.test_collision(pX, pY, pZ, pR);
}
}
__global__ void find_potential_collisions_kernel(int startIdx, int num, find_potential_collisions_func functor) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < num) {
functor(tid + startIdx);
}
}
__global__ void build_tree_kernel(int startIdx, int num, build_bvh_tree_func functor) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < num) {
functor(tid + startIdx);
}
} |
526cb6803c2f8053598af7b560d3e731abc18bab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
__global__ void add(int a, int b, int *c)//kernelgpu
{
*c = a + b;
} | 526cb6803c2f8053598af7b560d3e731abc18bab.cu | #include "includes.h"
using namespace std;
__global__ void add(int a, int b, int *c)//kernelๅฝๆฐ๏ผๅจgpuไธ่ฟ่กใ
{
*c = a + b;
} |
267cf2019e3f7c1cbbec3aa96293c75b2456f952.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "bit_line_maker.h"
#include "update_management_helper.h"
#include <cmath>
#include <iostream>
#include <memory>
#include "cuda_math_util.h"
#include "cuda_util.h"
#include "io_iterator.h"
#include <hipcub/hipcub.hpp>
namespace RPU {
// /*********************************************************************************/
/*--------- K<32 special path for large batch sizes. -------------------------------*/
template <bool ublm>
__device__ __forceinline__ kagg_t getKn(const int m_batch, const int BL, const kagg_t *nK);
template <bool ublm>
__device__ __forceinline__ kagg_t getBlockAggregate(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate);
template <bool ublm>
__device__ __forceinline__ int getK(int batch_idx, const int BL, int *K_values);
template <bool ublm>
__device__ __forceinline__ kagg_t
getCurrentKc(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate);
template <>
__device__ __forceinline__ kagg_t getKn<false>(const int m_batch, const int BL, const kagg_t *Kn) {
return m_batch * BL;
}
template <>
__device__ __forceinline__ kagg_t getKn<true>(const int m_batch, const int BL, const kagg_t *Kn) {
return *Kn;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<false>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
if ((thread_block_size + tid_stride) >= m_batch)
return (m_batch % thread_block_size) * BL;
else
return thread_block_size * BL;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<true>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
int bid = tid_stride / thread_block_size;
return Kc_block_aggregate[bid];
}
template <> __device__ __forceinline__ int getK<false>(int batch_idx, const int BL, int *K_values) {
return BL;
}
template <> __device__ __forceinline__ int getK<true>(int batch_idx, const int BL, int *K_values) {
return K_values[batch_idx];
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<false>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return batch_idx * BL - Kn_aggregate;
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<true>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return Kc_block[batch_idx];
}
template <bool update_bl_management = false, int thread_block_size = 512>
__global__ void kernelTranslateTransFormatToBatchOrder64Format(
const uint32_t *x_counts,
uint64_t *x_counts_BO64_format,
int x_size_in,
const uint32_t *d_counts,
uint64_t *d_counts_BO64_format,
int d_size_in,
const int m_batch_in,
const int BL_in,
kagg_t *Kn_in = nullptr,
int *K_values_in = nullptr,
kagg_t *Kc_block_in = nullptr,
kagg_t *Kc_block_aggregate_in = nullptr) {
// -- each block takes one x/d value.
// -- expects OUTTRANS format !!
__shared__ uint32_t c_shared[thread_block_size];
__shared__ uint32_t neg_shared[thread_block_size];
const int m_batch = m_batch_in;
const int BL = BL_in;
kagg_t Kn = getKn<update_bl_management>(m_batch, BL, Kn_in);
const int x_size = x_size_in;
const int d_size = d_size_in;
const int add_size = x_size + d_size;
int nB = ((Kn + 31) >> 5); // compressed K on batch
// loop xd indeces
for (int bid_stride = 0; bid_stride < add_size; bid_stride += gridDim.x) {
int bid = blockIdx.x + bid_stride;
// select x or d
const uint32_t *counts;
uint64_t *out_counts;
int xd_index;
if (bid < x_size) {
counts = x_counts;
out_counts = x_counts_BO64_format;
xd_index = bid;
} else if (bid < add_size) {
counts = d_counts;
out_counts = d_counts_BO64_format;
xd_index = bid - x_size;
} else {
return;
}
const int start_idx = xd_index * m_batch; // expects trans order !!
const int out_start_idx = xd_index * nB; // reduced batch size
int total_nB = 0;
uint32_t last_neg = 0;
uint32_t last_c = 0;
int current_nB = 0;
kagg_t Kc_aggregate = 0;
int K_left_over = 0;
// loop over batch
for (int tid_stride = 0; tid_stride < m_batch; tid_stride += blockDim.x) {
if (threadIdx.x > 0) {
c_shared[threadIdx.x] = 0;
neg_shared[threadIdx.x] = 0;
}
if (threadIdx.x == current_nB) { // to avoid a sync, see below
c_shared[0] = last_c;
neg_shared[0] = last_neg;
}
const int batch_idx = threadIdx.x + tid_stride;
kagg_t Kc_block_aggregate = getBlockAggregate<update_bl_management>(
m_batch, BL, tid_stride, thread_block_size, Kc_block_aggregate_in);
kagg_t current_Kc = Kc_block_aggregate;
int K = 0;
if (batch_idx < m_batch) {
K = getK<update_bl_management>(batch_idx, BL, K_values_in);
current_Kc = getCurrentKc<update_bl_management>(batch_idx, BL, Kc_block_in, Kc_aggregate);
}
Kc_block_aggregate += K_left_over;
current_Kc += K_left_over;
__syncthreads(); // need to sync for shared
if (batch_idx < m_batch) {
uint32_t c = counts[start_idx + batch_idx];
uint32_t negative = 0;
if ((c & ((uint32_t)1)) > 0) {
negative = 0xffffffff >> (32 - K);
}
c >>= 1; // get rid of negative bit
// set bit in shared
int i_word_start = current_Kc >> 5;
int i_word_end = (current_Kc + K) >> 5;
int i_bit_start = current_Kc & 0x1f;
atomicOr(&c_shared[i_word_start], c << i_bit_start);
atomicOr(&neg_shared[i_word_start], negative << i_bit_start);
if (i_word_start != i_word_end) { // most 31 bits per batch, so only 1 overlap possible
atomicOr(
&c_shared[i_word_end],
c >> (32 - i_bit_start)); // (32 - i_bit_start) first bits were already set above
atomicOr(&neg_shared[i_word_end], negative >> (32 - i_bit_start));
}
}
__syncthreads();
Kc_aggregate += Kc_block_aggregate;
kagg_t current_nB =
Kc_block_aggregate >> 5; // there might be some left over bits. put into next round
bool last_loop = tid_stride + blockDim.x >= m_batch;
K_left_over = Kc_aggregate & 0x1f;
bool left_overs = K_left_over > 0;
if ((threadIdx.x < current_nB) || ((threadIdx.x == current_nB) && last_loop && left_overs)) {
uint64_t c64 =
(((uint64_t)neg_shared[threadIdx.x]) << 32) | ((uint64_t)c_shared[threadIdx.x]);
out_counts[out_start_idx + total_nB + threadIdx.x] = c64;
} else if ((threadIdx.x == current_nB) && left_overs) { // save left overs
last_neg = neg_shared[current_nB];
last_c = c_shared[current_nB];
}
total_nB += current_nB;
}
}
}
namespace test_helper {
template <typename T, bool ublm>
int debugKernelTranslateTransFormatToBatchOrder64Format(
T *indata, int size, int m_batch, T scaleprob, int K) {
// counts should be: size*nk32 allocated !
if (K > 31)
return 1;
DebugPulsedUpdateMetaParameter<T> up;
up.res = 0.01;
up.sto_round = false;
up.update_bl_management = ublm;
up.update_management = ublm;
up.scaleprob = scaleprob;
up.desired_BL = K;
std::cout << "m_batch: " << m_batch << " size: " << size << std::endl;
const int nthreads = RPU_THREADS_PER_BLOCK_UPDATE;
CUDA_TIMING_INIT;
CudaContext c{-1, false};
T *tmp = new T[size * m_batch];
for (int i = 0; i < m_batch; i++) {
for (int j = 0; j < size; j++) {
tmp[i * size + j] = indata[j];
}
}
CudaArray<T> dev_indata(&c, size * m_batch, tmp);
c.synchronize();
delete[] tmp;
T dwmin = 0.001;
T lr = 0.01;
BitLineMaker<T> blm(&c, size, size);
blm.makeCounts(
dev_indata.getData(), dev_indata.getData(), up, dwmin, lr, m_batch, false, false, true, 2,
false); // compute B64 to init buffer for below
UpdateManagementHelper<T> *umh = blm.getUmh();
c.synchronize();
int nBmax = m_batch; // at most m_batch, likely smaller
CudaArray<uint64_t> dev_counts_out(&c, size * nBmax);
CudaArray<uint64_t> dev_counts_out2(&c, size * nBmax);
int nblocks = size + size;
std::cout << "nblocks, nthreads: " << nblocks << ", " << nthreads << std::endl;
CUDA_TIMING_START(c);
kagg_t *nK = nullptr;
int *K_values = nullptr;
kagg_t *Kc_block = nullptr;
kagg_t *Kc_block_aggregate = nullptr;
if (ublm) {
// redo computation for timing
umh->computeKn(m_batch); // needs explicit buffer init. see above.
nK = umh->getKnData(true);
K_values = umh->getKValueData();
umh->computeKcBlock(m_batch);
Kc_block = umh->getKcBlockData();
Kc_block_aggregate = umh->getKcBlockAggregateData();
}
CUDA_TIMING_STOP(c, "get Kn/Kcblock ");
CUDA_TIMING_START(c);
hipLaunchKernelGGL(( kernelTranslateTransFormatToBatchOrder64Format<ublm, nthreads>)
, dim3(nblocks), dim3(nthreads), 0, c.getStream(),
blm.getXCountsData(), dev_counts_out.getData(), size, blm.getDCountsData(),
dev_counts_out2.getData(), size, m_batch, K, nK, K_values, Kc_block, Kc_block_aggregate);
CUDA_TIMING_STOP(c, "Counts translated");
kagg_t Kn = 0;
if (ublm)
Kn = umh->getKnValue();
else
Kn = m_batch * K;
kagg_t nB = (Kn + 31) / 32;
// check translated:
int *Kvalues = new int[m_batch];
if (ublm)
umh->getKValues().copyTo(Kvalues);
uint32_t *orig_counts = new uint32_t[m_batch * size];
uint64_t *counts_out = new uint64_t[m_batch * size];
uint64_t *counts_out_ref = new uint64_t[m_batch * size];
dev_counts_out2.copyTo(counts_out);
blm.copyDCountsToHost(orig_counts);
for (int j = 0; j < m_batch * size; j++) {
counts_out_ref[j] = 0;
}
c.synchronize();
int return_int = 0;
kagg_t Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
if (ublm)
Kc += Kvalues[i_batch];
else
Kc += K;
}
int nBref = (Kc + 31) >> 5;
uint32_t one = 1;
// translate reference
for (int idx = 0; idx < size; idx++) {
Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
uint32_t c = orig_counts[i_batch + m_batch * idx];
uint32_t neg = c & one;
c >>= 1; // get rid of sign bit;
int k = K;
if (ublm)
k = Kvalues[i_batch];
for (int i = 0; i < k; i++) { // k is smaller than 32 because nK32==1
kagg_t current_cK = Kc + i;
kagg_t iB = (current_cK) >> 5;
int ibit = (current_cK)&0x1f;
if ((c & (one << i)) > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << ibit;
}
if (neg > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << (ibit + 32);
}
}
Kc += k;
}
}
std::cout << "nB should be " << nBref << " and is " << nB << ".\n";
if (nB != nBref) {
return_int = 1;
}
for (int j = 0; j < nBref * size; j++) {
if (counts_out_ref[j] != counts_out[j]) {
std::cerr << j << ":" << counts_out[j] << " should be " << counts_out_ref[j] << std::endl;
return_int = 1;
}
if ((j > 100) && return_int)
break;
}
delete[] counts_out;
delete[] orig_counts;
delete[] counts_out_ref;
delete[] Kvalues;
CUDA_TIMING_DESTROY;
return return_int;
}
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, true>(float *, int, int, float, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, false>(float *, int, int, float, int);
#ifdef RPU_USE_DOUBLE
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, true>(double *, int, int, double, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, false>(double *, int, int, double, int);
#endif
} // namespace test_helper
template <typename T>
__global__ void kernelUMGetScaleAndKValues(
T *scale_values,
int *K_values,
float *x_amax_values,
float *d_amax_values,
const int m_batch,
const bool ublm_in,
const T dw_min_in,
const T lr_in,
const int Kmax_in) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
bool ublm = ublm_in;
T dw_min = dw_min_in;
T lr = lr_in;
T regularizer = dw_min * dw_min;
if (tid < m_batch) {
T x_amax = MAX(x_amax_values[tid], regularizer);
T d_amax = MAX(d_amax_values[tid], regularizer);
scale_values[tid] = sqrt(x_amax / d_amax);
if (ublm) {
int Kmax = Kmax_in;
int K = ceil(lr * x_amax * d_amax / dw_min);
K_values[tid] = (K <= Kmax) ? K : Kmax;
}
// note: K values are not set in case of ~ublm
}
}
template <int thread_block_size>
__global__ void kernelGetKBlockAggregate(
int *K_values, int m_batch_in, kagg_t *Kc_block, kagg_t *Kc_block_aggregate) {
const int m_batch = m_batch_in;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ typename RPU::hipcub::BlockScan<kagg_t, thread_block_size>::TempStorage temp_storage;
int K = 0;
if (tid < m_batch) {
K = K_values[tid];
}
kagg_t Kc = 0;
kagg_t block_aggregate = 0;
RPU::hipcub::BlockScan<kagg_t, thread_block_size>(temp_storage).ExclusiveSum(K, Kc, block_aggregate);
if (tid < m_batch) {
Kc_block[tid] = Kc;
}
if (threadIdx.x == 0) {
Kc_block_aggregate[blockIdx.x] = block_aggregate;
}
}
/*********************************************************************************************************************/
/* UPDATEMANAGERHELPER */
/*********************************************************************************************************************/
#define RPU_UMH_B64_NTHREADS 512
template <typename T>
UpdateManagementHelper<T>::UpdateManagementHelper(CudaContext *c, int x_size, int d_size)
: context_{c}, x_size_{x_size}, d_size_{d_size}, buffer_m_batch_{0} {
nthreads_ = RPU_THREADS_PER_BLOCK_UPDATE;
x_maximizer_ = RPU::make_unique<Maximizer<T>>(c, x_size_);
d_maximizer_ = RPU::make_unique<Maximizer<T>>(c, d_size_);
dev_Kn_ = RPU::make_unique<CudaArray<kagg_t>>(c, 1);
}
template <typename T> void UpdateManagementHelper<T>::initializeBuffers(int m_batch) {
buffer_m_batch_ = m_batch;
dev_K_values_ = RPU::make_unique<CudaArray<int>>(context_, m_batch);
dev_Kc_values_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_scale_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch);
// for translate
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
dev_Kc_block_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_Kc_block_aggregate_ = RPU::make_unique<CudaArray<kagg_t>>(context_, nblocks);
// Determine temporary device storage requirements
void *temp_storage = NULL;
size_t temp_storage_bytes = 0;
CUDA_CALL(RPU::hipcub::DeviceReduce::Sum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kn_->getData(), m_batch,
context_->getStream()));
context_->synchronize();
dev_Kn_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
CUDA_CALL(RPU::hipcub::DeviceScan::ExclusiveSum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kc_values_->getData(),
m_batch, context_->getStream()));
context_->synchronize();
dev_Kc_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
}
template <typename T> void UpdateManagementHelper<T>::computeKcBlock(int m_batch) {
// CAUTION: needs K_values to be already computed !!
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
hipLaunchKernelGGL(( kernelGetKBlockAggregate<nthreads>), dim3(nblocks), dim3(nthreads), 0, context_->getStream(),
dev_K_values_->getData(), m_batch, dev_Kc_block_->getData(),
dev_Kc_block_aggregate_->getData());
}
template <typename T> void UpdateManagementHelper<T>::computeKc(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kc_temp_storage_->getSize();
CUDA_CALL(RPU::hipcub::DeviceScan::ExclusiveSum(
(void *)dev_Kc_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kc_values_->getData(), m_batch, context_->getStream()));
}
template <typename T> void UpdateManagementHelper<T>::computeKn(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kn_temp_storage_->getSize();
CUDA_CALL(RPU::hipcub::DeviceReduce::Sum(
(void *)dev_Kn_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kn_->getData(), m_batch, context_->getStream()));
}
template <typename T>
void UpdateManagementHelper<T>::translateTransToBatchOrder64(
uint64_t *x_counts_bo64,
uint64_t *d_counts_bo64,
const uint32_t *x_counts,
const uint32_t *d_counts,
const int m_batch,
const int BL,
const bool update_bl_management) {
// needs K values to be precomputed for ublm !!
if (BL > 31) {
RPU_FATAL("ERROR: BO64 format only supported for BL<32");
}
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
const int nthreads = RPU_UMH_B64_NTHREADS; // how many ? test...
int nblocks = d_size_ + x_size_;
if (update_bl_management) {
this->computeKcBlock(m_batch);
this->computeKn(m_batch);
hipLaunchKernelGGL(( kernelTranslateTransFormatToBatchOrder64Format<true, nthreads>)
, dim3(nblocks), dim3(nthreads), 0, context_->getStream(),
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL,
this->getKnData(true), this->getKValueData(), this->getKcBlockData(),
this->getKcBlockAggregateData());
// context_->synchronize();
} else {
// no update bl management
hipLaunchKernelGGL(( kernelTranslateTransFormatToBatchOrder64Format<false, nthreads>)
, dim3(nblocks), dim3(nthreads), 0, context_->getStream(),
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL);
}
}
template <typename T>
template <typename XInputIteratorT, typename DInputIteratorT>
void UpdateManagementHelper<T>::computeKandScaleValues(
XInputIteratorT x_in,
DInputIteratorT d_in,
const T dw_min,
const T lr,
const bool update_management,
const bool update_bl_management,
const int m_batch,
const bool x_trans,
const bool d_trans,
const int Kmax) {
if ((!update_management) && (!update_bl_management)) {
return;
} else {
// get max values
x_maximizer_->compute(x_in, m_batch, x_trans);
d_maximizer_->compute(d_in, m_batch, d_trans);
// initilize if necessary
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
// compute
int nblocks = context_->getNBlocks(m_batch, nthreads_);
hipLaunchKernelGGL(( kernelUMGetScaleAndKValues), dim3(nblocks), dim3(nthreads_), 0, context_->getStream(),
dev_scale_values_->getData(), dev_K_values_->getData(), x_maximizer_->getMaxValues(),
d_maximizer_->getMaxValues(), m_batch, update_bl_management, dw_min, lr, Kmax);
}
}
#define RPU_UMH_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \
template void UpdateManagementHelper<NUM_T>::computeKandScaleValues( \
XITERT, DITERT, const NUM_T, const NUM_T, const bool, const bool, const int, const bool, \
const bool, const int);
#define TRANSFLOAT(TRANS) TRANS, float
template class UpdateManagementHelper<float>;
RPU_UMH_ITER_TEMPLATE(float, const float *, const float *);
RPU_UMH_ITER_TEMPLATE(float, float *, float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *);
#undef TRANSFLOAT
#ifdef RPU_USE_DOUBLE
#define TRANSDOUBLE(TRANS) TRANS, double
template class UpdateManagementHelper<double>;
RPU_UMH_ITER_TEMPLATE(double, const double *, const double *);
RPU_UMH_ITER_TEMPLATE(double, double *, double *);
RPU_UMH_ITER_TEMPLATE(
double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(true)>,
SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(false)>,
SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *);
#undef TRANSDOUBLE
#endif
#undef RPU_UMH_ITER_TEMPLATE
} // namespace RPU
| 267cf2019e3f7c1cbbec3aa96293c75b2456f952.cu | /**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "bit_line_maker.h"
#include "update_management_helper.h"
#include <cmath>
#include <iostream>
#include <memory>
#include "cuda_math_util.h"
#include "cuda_util.h"
#include "io_iterator.h"
#include <cub/cub.cuh>
namespace RPU {
// /*********************************************************************************/
/*--------- K<32 special path for large batch sizes. -------------------------------*/
template <bool ublm>
__device__ __forceinline__ kagg_t getKn(const int m_batch, const int BL, const kagg_t *nK);
template <bool ublm>
__device__ __forceinline__ kagg_t getBlockAggregate(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate);
template <bool ublm>
__device__ __forceinline__ int getK(int batch_idx, const int BL, int *K_values);
template <bool ublm>
__device__ __forceinline__ kagg_t
getCurrentKc(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate);
template <>
__device__ __forceinline__ kagg_t getKn<false>(const int m_batch, const int BL, const kagg_t *Kn) {
return m_batch * BL;
}
template <>
__device__ __forceinline__ kagg_t getKn<true>(const int m_batch, const int BL, const kagg_t *Kn) {
return *Kn;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<false>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
if ((thread_block_size + tid_stride) >= m_batch)
return (m_batch % thread_block_size) * BL;
else
return thread_block_size * BL;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<true>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
int bid = tid_stride / thread_block_size;
return Kc_block_aggregate[bid];
}
template <> __device__ __forceinline__ int getK<false>(int batch_idx, const int BL, int *K_values) {
return BL;
}
template <> __device__ __forceinline__ int getK<true>(int batch_idx, const int BL, int *K_values) {
return K_values[batch_idx];
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<false>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return batch_idx * BL - Kn_aggregate;
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<true>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return Kc_block[batch_idx];
}
template <bool update_bl_management = false, int thread_block_size = 512>
__global__ void kernelTranslateTransFormatToBatchOrder64Format(
const uint32_t *x_counts,
uint64_t *x_counts_BO64_format,
int x_size_in,
const uint32_t *d_counts,
uint64_t *d_counts_BO64_format,
int d_size_in,
const int m_batch_in,
const int BL_in,
kagg_t *Kn_in = nullptr,
int *K_values_in = nullptr,
kagg_t *Kc_block_in = nullptr,
kagg_t *Kc_block_aggregate_in = nullptr) {
// -- each block takes one x/d value.
// -- expects OUTTRANS format !!
__shared__ uint32_t c_shared[thread_block_size];
__shared__ uint32_t neg_shared[thread_block_size];
const int m_batch = m_batch_in;
const int BL = BL_in;
kagg_t Kn = getKn<update_bl_management>(m_batch, BL, Kn_in);
const int x_size = x_size_in;
const int d_size = d_size_in;
const int add_size = x_size + d_size;
int nB = ((Kn + 31) >> 5); // compressed K on batch
// loop xd indeces
for (int bid_stride = 0; bid_stride < add_size; bid_stride += gridDim.x) {
int bid = blockIdx.x + bid_stride;
// select x or d
const uint32_t *counts;
uint64_t *out_counts;
int xd_index;
if (bid < x_size) {
counts = x_counts;
out_counts = x_counts_BO64_format;
xd_index = bid;
} else if (bid < add_size) {
counts = d_counts;
out_counts = d_counts_BO64_format;
xd_index = bid - x_size;
} else {
return;
}
const int start_idx = xd_index * m_batch; // expects trans order !!
const int out_start_idx = xd_index * nB; // reduced batch size
int total_nB = 0;
uint32_t last_neg = 0;
uint32_t last_c = 0;
int current_nB = 0;
kagg_t Kc_aggregate = 0;
int K_left_over = 0;
// loop over batch
for (int tid_stride = 0; tid_stride < m_batch; tid_stride += blockDim.x) {
if (threadIdx.x > 0) {
c_shared[threadIdx.x] = 0;
neg_shared[threadIdx.x] = 0;
}
if (threadIdx.x == current_nB) { // to avoid a sync, see below
c_shared[0] = last_c;
neg_shared[0] = last_neg;
}
const int batch_idx = threadIdx.x + tid_stride;
kagg_t Kc_block_aggregate = getBlockAggregate<update_bl_management>(
m_batch, BL, tid_stride, thread_block_size, Kc_block_aggregate_in);
kagg_t current_Kc = Kc_block_aggregate;
int K = 0;
if (batch_idx < m_batch) {
K = getK<update_bl_management>(batch_idx, BL, K_values_in);
current_Kc = getCurrentKc<update_bl_management>(batch_idx, BL, Kc_block_in, Kc_aggregate);
}
Kc_block_aggregate += K_left_over;
current_Kc += K_left_over;
__syncthreads(); // need to sync for shared
if (batch_idx < m_batch) {
uint32_t c = counts[start_idx + batch_idx];
uint32_t negative = 0;
if ((c & ((uint32_t)1)) > 0) {
negative = 0xffffffff >> (32 - K);
}
c >>= 1; // get rid of negative bit
// set bit in shared
int i_word_start = current_Kc >> 5;
int i_word_end = (current_Kc + K) >> 5;
int i_bit_start = current_Kc & 0x1f;
atomicOr(&c_shared[i_word_start], c << i_bit_start);
atomicOr(&neg_shared[i_word_start], negative << i_bit_start);
if (i_word_start != i_word_end) { // most 31 bits per batch, so only 1 overlap possible
atomicOr(
&c_shared[i_word_end],
c >> (32 - i_bit_start)); // (32 - i_bit_start) first bits were already set above
atomicOr(&neg_shared[i_word_end], negative >> (32 - i_bit_start));
}
}
__syncthreads();
Kc_aggregate += Kc_block_aggregate;
kagg_t current_nB =
Kc_block_aggregate >> 5; // there might be some left over bits. put into next round
bool last_loop = tid_stride + blockDim.x >= m_batch;
K_left_over = Kc_aggregate & 0x1f;
bool left_overs = K_left_over > 0;
if ((threadIdx.x < current_nB) || ((threadIdx.x == current_nB) && last_loop && left_overs)) {
uint64_t c64 =
(((uint64_t)neg_shared[threadIdx.x]) << 32) | ((uint64_t)c_shared[threadIdx.x]);
out_counts[out_start_idx + total_nB + threadIdx.x] = c64;
} else if ((threadIdx.x == current_nB) && left_overs) { // save left overs
last_neg = neg_shared[current_nB];
last_c = c_shared[current_nB];
}
total_nB += current_nB;
}
}
}
namespace test_helper {
template <typename T, bool ublm>
int debugKernelTranslateTransFormatToBatchOrder64Format(
T *indata, int size, int m_batch, T scaleprob, int K) {
// counts should be: size*nk32 allocated !
if (K > 31)
return 1;
DebugPulsedUpdateMetaParameter<T> up;
up.res = 0.01;
up.sto_round = false;
up.update_bl_management = ublm;
up.update_management = ublm;
up.scaleprob = scaleprob;
up.desired_BL = K;
std::cout << "m_batch: " << m_batch << " size: " << size << std::endl;
const int nthreads = RPU_THREADS_PER_BLOCK_UPDATE;
CUDA_TIMING_INIT;
CudaContext c{-1, false};
T *tmp = new T[size * m_batch];
for (int i = 0; i < m_batch; i++) {
for (int j = 0; j < size; j++) {
tmp[i * size + j] = indata[j];
}
}
CudaArray<T> dev_indata(&c, size * m_batch, tmp);
c.synchronize();
delete[] tmp;
T dwmin = 0.001;
T lr = 0.01;
BitLineMaker<T> blm(&c, size, size);
blm.makeCounts(
dev_indata.getData(), dev_indata.getData(), up, dwmin, lr, m_batch, false, false, true, 2,
false); // compute B64 to init buffer for below
UpdateManagementHelper<T> *umh = blm.getUmh();
c.synchronize();
int nBmax = m_batch; // at most m_batch, likely smaller
CudaArray<uint64_t> dev_counts_out(&c, size * nBmax);
CudaArray<uint64_t> dev_counts_out2(&c, size * nBmax);
int nblocks = size + size;
std::cout << "nblocks, nthreads: " << nblocks << ", " << nthreads << std::endl;
CUDA_TIMING_START(c);
kagg_t *nK = nullptr;
int *K_values = nullptr;
kagg_t *Kc_block = nullptr;
kagg_t *Kc_block_aggregate = nullptr;
if (ublm) {
// redo computation for timing
umh->computeKn(m_batch); // needs explicit buffer init. see above.
nK = umh->getKnData(true);
K_values = umh->getKValueData();
umh->computeKcBlock(m_batch);
Kc_block = umh->getKcBlockData();
Kc_block_aggregate = umh->getKcBlockAggregateData();
}
CUDA_TIMING_STOP(c, "get Kn/Kcblock ");
CUDA_TIMING_START(c);
kernelTranslateTransFormatToBatchOrder64Format<ublm, nthreads>
<<<nblocks, nthreads, 0, c.getStream()>>>(
blm.getXCountsData(), dev_counts_out.getData(), size, blm.getDCountsData(),
dev_counts_out2.getData(), size, m_batch, K, nK, K_values, Kc_block, Kc_block_aggregate);
CUDA_TIMING_STOP(c, "Counts translated");
kagg_t Kn = 0;
if (ublm)
Kn = umh->getKnValue();
else
Kn = m_batch * K;
kagg_t nB = (Kn + 31) / 32;
// check translated:
int *Kvalues = new int[m_batch];
if (ublm)
umh->getKValues().copyTo(Kvalues);
uint32_t *orig_counts = new uint32_t[m_batch * size];
uint64_t *counts_out = new uint64_t[m_batch * size];
uint64_t *counts_out_ref = new uint64_t[m_batch * size];
dev_counts_out2.copyTo(counts_out);
blm.copyDCountsToHost(orig_counts);
for (int j = 0; j < m_batch * size; j++) {
counts_out_ref[j] = 0;
}
c.synchronize();
int return_int = 0;
kagg_t Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
if (ublm)
Kc += Kvalues[i_batch];
else
Kc += K;
}
int nBref = (Kc + 31) >> 5;
uint32_t one = 1;
// translate reference
for (int idx = 0; idx < size; idx++) {
Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
uint32_t c = orig_counts[i_batch + m_batch * idx];
uint32_t neg = c & one;
c >>= 1; // get rid of sign bit;
int k = K;
if (ublm)
k = Kvalues[i_batch];
for (int i = 0; i < k; i++) { // k is smaller than 32 because nK32==1
kagg_t current_cK = Kc + i;
kagg_t iB = (current_cK) >> 5;
int ibit = (current_cK)&0x1f;
if ((c & (one << i)) > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << ibit;
}
if (neg > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << (ibit + 32);
}
}
Kc += k;
}
}
std::cout << "nB should be " << nBref << " and is " << nB << ".\n";
if (nB != nBref) {
return_int = 1;
}
for (int j = 0; j < nBref * size; j++) {
if (counts_out_ref[j] != counts_out[j]) {
std::cerr << j << ":" << counts_out[j] << " should be " << counts_out_ref[j] << std::endl;
return_int = 1;
}
if ((j > 100) && return_int)
break;
}
delete[] counts_out;
delete[] orig_counts;
delete[] counts_out_ref;
delete[] Kvalues;
CUDA_TIMING_DESTROY;
return return_int;
}
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, true>(float *, int, int, float, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, false>(float *, int, int, float, int);
#ifdef RPU_USE_DOUBLE
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, true>(double *, int, int, double, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, false>(double *, int, int, double, int);
#endif
} // namespace test_helper
template <typename T>
__global__ void kernelUMGetScaleAndKValues(
T *scale_values,
int *K_values,
float *x_amax_values,
float *d_amax_values,
const int m_batch,
const bool ublm_in,
const T dw_min_in,
const T lr_in,
const int Kmax_in) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
bool ublm = ublm_in;
T dw_min = dw_min_in;
T lr = lr_in;
T regularizer = dw_min * dw_min;
if (tid < m_batch) {
T x_amax = MAX(x_amax_values[tid], regularizer);
T d_amax = MAX(d_amax_values[tid], regularizer);
scale_values[tid] = sqrt(x_amax / d_amax);
if (ublm) {
int Kmax = Kmax_in;
int K = ceil(lr * x_amax * d_amax / dw_min);
K_values[tid] = (K <= Kmax) ? K : Kmax;
}
// note: K values are not set in case of ~ublm
}
}
template <int thread_block_size>
__global__ void kernelGetKBlockAggregate(
int *K_values, int m_batch_in, kagg_t *Kc_block, kagg_t *Kc_block_aggregate) {
const int m_batch = m_batch_in;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ typename RPU::cub::BlockScan<kagg_t, thread_block_size>::TempStorage temp_storage;
int K = 0;
if (tid < m_batch) {
K = K_values[tid];
}
kagg_t Kc = 0;
kagg_t block_aggregate = 0;
RPU::cub::BlockScan<kagg_t, thread_block_size>(temp_storage).ExclusiveSum(K, Kc, block_aggregate);
if (tid < m_batch) {
Kc_block[tid] = Kc;
}
if (threadIdx.x == 0) {
Kc_block_aggregate[blockIdx.x] = block_aggregate;
}
}
/*********************************************************************************************************************/
/* UPDATEMANAGERHELPER */
/*********************************************************************************************************************/
#define RPU_UMH_B64_NTHREADS 512
template <typename T>
UpdateManagementHelper<T>::UpdateManagementHelper(CudaContext *c, int x_size, int d_size)
: context_{c}, x_size_{x_size}, d_size_{d_size}, buffer_m_batch_{0} {
nthreads_ = RPU_THREADS_PER_BLOCK_UPDATE;
x_maximizer_ = RPU::make_unique<Maximizer<T>>(c, x_size_);
d_maximizer_ = RPU::make_unique<Maximizer<T>>(c, d_size_);
dev_Kn_ = RPU::make_unique<CudaArray<kagg_t>>(c, 1);
}
template <typename T> void UpdateManagementHelper<T>::initializeBuffers(int m_batch) {
buffer_m_batch_ = m_batch;
dev_K_values_ = RPU::make_unique<CudaArray<int>>(context_, m_batch);
dev_Kc_values_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_scale_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch);
// for translate
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
dev_Kc_block_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_Kc_block_aggregate_ = RPU::make_unique<CudaArray<kagg_t>>(context_, nblocks);
// Determine temporary device storage requirements
void *temp_storage = NULL;
size_t temp_storage_bytes = 0;
CUDA_CALL(RPU::cub::DeviceReduce::Sum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kn_->getData(), m_batch,
context_->getStream()));
context_->synchronize();
dev_Kn_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
CUDA_CALL(RPU::cub::DeviceScan::ExclusiveSum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kc_values_->getData(),
m_batch, context_->getStream()));
context_->synchronize();
dev_Kc_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
}
template <typename T> void UpdateManagementHelper<T>::computeKcBlock(int m_batch) {
// CAUTION: needs K_values to be already computed !!
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
kernelGetKBlockAggregate<nthreads><<<nblocks, nthreads, 0, context_->getStream()>>>(
dev_K_values_->getData(), m_batch, dev_Kc_block_->getData(),
dev_Kc_block_aggregate_->getData());
}
template <typename T> void UpdateManagementHelper<T>::computeKc(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kc_temp_storage_->getSize();
CUDA_CALL(RPU::cub::DeviceScan::ExclusiveSum(
(void *)dev_Kc_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kc_values_->getData(), m_batch, context_->getStream()));
}
template <typename T> void UpdateManagementHelper<T>::computeKn(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kn_temp_storage_->getSize();
CUDA_CALL(RPU::cub::DeviceReduce::Sum(
(void *)dev_Kn_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kn_->getData(), m_batch, context_->getStream()));
}
template <typename T>
void UpdateManagementHelper<T>::translateTransToBatchOrder64(
uint64_t *x_counts_bo64,
uint64_t *d_counts_bo64,
const uint32_t *x_counts,
const uint32_t *d_counts,
const int m_batch,
const int BL,
const bool update_bl_management) {
// needs K values to be precomputed for ublm !!
if (BL > 31) {
RPU_FATAL("ERROR: BO64 format only supported for BL<32");
}
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
const int nthreads = RPU_UMH_B64_NTHREADS; // how many ? test...
int nblocks = d_size_ + x_size_;
if (update_bl_management) {
this->computeKcBlock(m_batch);
this->computeKn(m_batch);
kernelTranslateTransFormatToBatchOrder64Format<true, nthreads>
<<<nblocks, nthreads, 0, context_->getStream()>>>(
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL,
this->getKnData(true), this->getKValueData(), this->getKcBlockData(),
this->getKcBlockAggregateData());
// context_->synchronize();
} else {
// no update bl management
kernelTranslateTransFormatToBatchOrder64Format<false, nthreads>
<<<nblocks, nthreads, 0, context_->getStream()>>>(
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL);
}
}
template <typename T>
template <typename XInputIteratorT, typename DInputIteratorT>
void UpdateManagementHelper<T>::computeKandScaleValues(
XInputIteratorT x_in,
DInputIteratorT d_in,
const T dw_min,
const T lr,
const bool update_management,
const bool update_bl_management,
const int m_batch,
const bool x_trans,
const bool d_trans,
const int Kmax) {
if ((!update_management) && (!update_bl_management)) {
return;
} else {
// get max values
x_maximizer_->compute(x_in, m_batch, x_trans);
d_maximizer_->compute(d_in, m_batch, d_trans);
// initilize if necessary
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
// compute
int nblocks = context_->getNBlocks(m_batch, nthreads_);
kernelUMGetScaleAndKValues<<<nblocks, nthreads_, 0, context_->getStream()>>>(
dev_scale_values_->getData(), dev_K_values_->getData(), x_maximizer_->getMaxValues(),
d_maximizer_->getMaxValues(), m_batch, update_bl_management, dw_min, lr, Kmax);
}
}
#define RPU_UMH_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \
template void UpdateManagementHelper<NUM_T>::computeKandScaleValues( \
XITERT, DITERT, const NUM_T, const NUM_T, const bool, const bool, const int, const bool, \
const bool, const int);
#define TRANSFLOAT(TRANS) TRANS, float
template class UpdateManagementHelper<float>;
RPU_UMH_ITER_TEMPLATE(float, const float *, const float *);
RPU_UMH_ITER_TEMPLATE(float, float *, float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *);
#undef TRANSFLOAT
#ifdef RPU_USE_DOUBLE
#define TRANSDOUBLE(TRANS) TRANS, double
template class UpdateManagementHelper<double>;
RPU_UMH_ITER_TEMPLATE(double, const double *, const double *);
RPU_UMH_ITER_TEMPLATE(double, double *, double *);
RPU_UMH_ITER_TEMPLATE(
double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(true)>,
SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(false)>,
SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *);
#undef TRANSDOUBLE
#endif
#undef RPU_UMH_ITER_TEMPLATE
} // namespace RPU
|
7e12770cc0f3c2539b1ca1fc63650c92a4f488d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
// code adapted from github repo
// implemented by Yijie Guo ([email protected]) and Xinchen Yan ([email protected])
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeftFront(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
//if(threadIdx.x<32) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+32]; }
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16] + s[threadIdx.x+32]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__device__ void sumReduceShMemPerspective(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
// Affine Transformation
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideDepth, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int output_depth, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 3 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int depth = output_depth;
const int b = blockIdx.z/depth;
const int zOut = blockIdx.z%depth;
float zf, yf,xf;
__shared__ float gridData[48];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + zOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
zf = gridData[threadIdx.y*3];
yf = gridData[threadIdx.y*3+1];
xf = gridData[threadIdx.y*3+2];
//printf("%.3f %.3f %.3f\n", zf, yf, xf);
int yInTopLeftFront, xInTopLeftFront, zInTopLeftFront;
float yWeightTopLeftFront, xWeightTopLeftFront, zWeightTopLeftFront;
getTopLeftFront(zf, inputImages_depth, zInTopLeftFront, zWeightTopLeftFront);
getTopLeftFront(yf, inputImages_height, yInTopLeftFront, yWeightTopLeftFront);
getTopLeftFront(xf, inputImages_width, xInTopLeftFront, xWeightTopLeftFront);
//printf("GPU y[%.3f] x[%.3f] z[%.3f] WeightTopLeftFront\n",yWeightTopLeftFront, xWeightTopLeftFront, zWeightTopLeftFront);
// printf("GPU y[%d] x[%d] z[%d] InTopLeftFront\n",yInTopLeftFront, xInTopLeftFront, zInTopLeftFront);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut + output_strideDepth * zOut;
const int inTopLeftFrontAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeftFront
+ inputImages_strideWidth * xInTopLeftFront + inputImages_strideDepth * zInTopLeftFront;
const int inTopLeftBackAddress = inTopLeftFrontAddress + inputImages_strideDepth;
const int inTopRightFrontAddress = inTopLeftFrontAddress + inputImages_strideWidth;
const int inTopRightBackAddress = inTopRightFrontAddress + inputImages_strideDepth;
const int inBottomLeftFrontAddress = inTopLeftFrontAddress + inputImages_strideHeight;
const int inBottomLeftBackAddress = inBottomLeftFrontAddress + inputImages_strideDepth;
const int inBottomRightFrontAddress = inBottomLeftFrontAddress + inputImages_strideWidth;
const int inBottomRightBackAddress = inBottomRightFrontAddress + inputImages_strideDepth;
float v=0;
float inTopLeftFront=0;
float inTopLeftBack=0;
float inTopRightFront=0;
float inTopRightBack=0;
float inBottomLeftFront=0;
float inBottomLeftBack=0;
float inBottomRightFront=0;
float inBottomRightBack=0;
bool topLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool topRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftFrontIsIn) inTopLeftFront = inputImages_data[inTopLeftFrontAddress + t];
if(topLeftBackIsIn) inTopLeftBack = inputImages_data[inTopLeftBackAddress + t];
if(topRightFrontIsIn) inTopRightFront = inputImages_data[inTopRightFrontAddress + t];
if(topRightBackIsIn) inTopRightBack = inputImages_data[inTopRightBackAddress + t];
if(bottomLeftFrontIsIn) inBottomLeftFront = inputImages_data[inBottomLeftFrontAddress + t];
if(bottomLeftBackIsIn) inBottomLeftBack = inputImages_data[inBottomLeftBackAddress + t];
if(bottomRightFrontIsIn) inBottomRightFront = inputImages_data[inBottomRightFrontAddress + t];
if(bottomRightBackIsIn) inBottomRightBack = inputImages_data[inBottomRightBackAddress + t];
v = xWeightTopLeftFront * yWeightTopLeftFront * zWeightTopLeftFront * inTopLeftFront
+ xWeightTopLeftFront * yWeightTopLeftFront * (1-zWeightTopLeftFront) * inTopLeftBack
+ (1 - xWeightTopLeftFront) * yWeightTopLeftFront * zWeightTopLeftFront * inTopRightFront
+ (1 - xWeightTopLeftFront) * yWeightTopLeftFront * (1-zWeightTopLeftFront) * inTopRightBack
+ xWeightTopLeftFront * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * inBottomLeftFront
+ xWeightTopLeftFront * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * inBottomLeftBack
+ (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * inBottomRightFront
+ (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * inBottomRightBack;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
//printf("GPU hello\n");
dim3 blocks((output->size[3]+15)/16, output->size[2], output->size[0]*output->size[1]);
dim3 threads(48,16);
/* assume BHWD */
hipLaunchKernelGGL(( bilinearSamplingFromGrid) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 4),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_stride(state, output, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, output, 1),
THCudaTensor_size(state, output, 3));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
//printf("GPU forward end!\n");
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSampling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideDepth, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideDepth, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideDepth, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int gradOutput_depth, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 3 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int depth = gradOutput_depth;
const int b = blockIdx.z/depth;
const int zOut = blockIdx.z%depth;
float yf,xf, zf;
__shared__ float gridData[48];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + zOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
zf = gridData[threadIdx.y*3];
yf = gridData[threadIdx.y*3+1];
xf = gridData[threadIdx.y*3+2];
int yInTopLeftFront, xInTopLeftFront, zInTopLeftFront;
float yWeightTopLeftFront, xWeightTopLeftFront, zWeightTopLeftFront;
getTopLeftFront(zf, inputImages_depth, zInTopLeftFront, zWeightTopLeftFront);
getTopLeftFront(yf, inputImages_height, yInTopLeftFront, yWeightTopLeftFront);
getTopLeftFront(xf, inputImages_width, xInTopLeftFront, xWeightTopLeftFront);
const int inTopLeftFrontAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeftFront
+ inputImages_strideWidth * xInTopLeftFront + inputImages_strideDepth * zInTopLeftFront;
const int inTopLeftBackAddress = inTopLeftFrontAddress + inputImages_strideDepth;
const int inTopRightFrontAddress = inTopLeftFrontAddress + inputImages_strideWidth;
const int inTopRightBackAddress = inTopRightFrontAddress + inputImages_strideDepth;
const int inBottomLeftFrontAddress = inTopLeftFrontAddress + inputImages_strideHeight;
const int inBottomLeftBackAddress = inBottomLeftFrontAddress + inputImages_strideDepth;
const int inBottomRightFrontAddress = inBottomLeftFrontAddress + inputImages_strideWidth;
const int inBottomRightBackAddress = inBottomRightFrontAddress + inputImages_strideDepth;
const int gradInputImagesTopLeftFrontAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeftFront + gradInputImages_strideWidth * xInTopLeftFront + gradInputImages_strideDepth * zInTopLeftFront;
const int gradInputImagesTopLeftBackAddress = gradInputImagesTopLeftFrontAddress + gradInputImages_strideDepth;
const int gradInputImagesTopRightFrontAddress = gradInputImagesTopLeftFrontAddress + gradInputImages_strideWidth;
const int gradInputImagesTopRightBackAddress = gradInputImagesTopRightFrontAddress + gradInputImages_strideDepth;
const int gradInputImagesBottomLeftFrontAddress = gradInputImagesTopLeftFrontAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomLeftBackAddress = gradInputImagesBottomLeftFrontAddress + gradInputImages_strideDepth;
const int gradInputImagesBottomRightFrontAddress = gradInputImagesBottomLeftFrontAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomRightBackAddress = gradInputImagesBottomRightFrontAddress + gradInputImages_strideDepth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut + gradOutput_strideDepth * zOut;
float topLeftFrontDotProduct = 0;
float topLeftBackDotProduct = 0;
float topRightFrontDotProduct = 0;
float topRightBackDotProduct = 0;
float bottomLeftFrontDotProduct = 0;
float bottomLeftBackDotProduct = 0;
float bottomRightFrontDotProduct = 0;
float bottomRightBackDotProduct = 0;
bool topLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool topRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftFrontIsIn)
{
float inTopLeftFront = inputImages_data[inTopLeftFrontAddress + t];
topLeftFrontDotProduct += inTopLeftFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftFrontAddress + t], xWeightTopLeftFront * yWeightTopLeftFront * zWeightTopLeftFront * gradOutValue);
}
if(topLeftBackIsIn)
{
float inTopLeftBack = inputImages_data[inTopLeftBackAddress + t];
topLeftBackDotProduct += inTopLeftBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftBackAddress + t], xWeightTopLeftFront * yWeightTopLeftFront * (1-zWeightTopLeftFront) * gradOutValue);
}
if(topRightFrontIsIn)
{
float inTopRightFront = inputImages_data[inTopRightFrontAddress + t];
topRightFrontDotProduct += inTopRightFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightFrontAddress + t], (1 - xWeightTopLeftFront) * yWeightTopLeftFront * zWeightTopLeftFront * gradOutValue);
}
if(topRightBackIsIn)
{
float inTopRightBack = inputImages_data[inTopRightBackAddress + t];
topRightBackDotProduct += inTopRightBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightBackAddress + t], (1 - xWeightTopLeftFront) * yWeightTopLeftFront * (1-zWeightTopLeftFront) * gradOutValue);
}
if(bottomLeftFrontIsIn)
{
float inBottomLeftFront = inputImages_data[inBottomLeftFrontAddress + t];
bottomLeftFrontDotProduct += inBottomLeftFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftFrontAddress + t], xWeightTopLeftFront * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * gradOutValue);
}
if(bottomLeftBackIsIn)
{
float inBottomLeftBack = inputImages_data[inBottomLeftBackAddress + t];
bottomLeftBackDotProduct += inBottomLeftBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftBackAddress + t], xWeightTopLeftFront * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * gradOutValue);
}
if(bottomRightFrontIsIn)
{
float inBottomRightFront = inputImages_data[inBottomRightFrontAddress + t];
bottomRightFrontDotProduct += inBottomRightFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightFrontAddress + t], (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * gradOutValue);
}
if(bottomRightBackIsIn)
{
float inBottomRightBack = inputImages_data[inBottomRightBackAddress + t];
bottomRightBackDotProduct += inBottomRightBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightBackAddress + t], (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
/*
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
*/
__shared__ volatile float __shmem[16][48];
__shmem[threadIdx.y][threadIdx.x] = topLeftFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = topLeftBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = topRightFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = topRightBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomLeftFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomLeftBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomRightFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomRightBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
yf = topLeftFrontDotProduct * xWeightTopLeftFront * zWeightTopLeftFront * (-1)
+ topLeftBackDotProduct * xWeightTopLeftFront * (1-zWeightTopLeftFront) * (-1)
+ topRightFrontDotProduct * (1-xWeightTopLeftFront) * zWeightTopLeftFront * (-1)
+ topRightBackDotProduct * (1-xWeightTopLeftFront) * (1-zWeightTopLeftFront) *(-1)
+ bottomLeftFrontDotProduct * xWeightTopLeftFront * zWeightTopLeftFront * (1)
+ bottomLeftBackDotProduct * xWeightTopLeftFront * (1-zWeightTopLeftFront) * (1)
+ bottomRightFrontDotProduct * (1-xWeightTopLeftFront) * zWeightTopLeftFront * (1)
+ bottomRightBackDotProduct * (1-xWeightTopLeftFront) * (1-zWeightTopLeftFront) *(1);
xf = topLeftFrontDotProduct * yWeightTopLeftFront * zWeightTopLeftFront *(-1)
+ topLeftBackDotProduct * yWeightTopLeftFront * (1-zWeightTopLeftFront) *(-1)
+ topRightFrontDotProduct * yWeightTopLeftFront * zWeightTopLeftFront * 1
+ topRightBackDotProduct * yWeightTopLeftFront * (1-zWeightTopLeftFront) * 1
+ bottomLeftFrontDotProduct * (1-yWeightTopLeftFront) * zWeightTopLeftFront * (-1)
+ bottomLeftBackDotProduct * (1-yWeightTopLeftFront) * (1-zWeightTopLeftFront) * (-1)
+ bottomRightFrontDotProduct * (1-yWeightTopLeftFront) * zWeightTopLeftFront * (1)
+ bottomRightBackDotProduct * (1-yWeightTopLeftFront) *(1-zWeightTopLeftFront) * (1);
zf = topLeftFrontDotProduct * yWeightTopLeftFront * xWeightTopLeftFront * (-1)
+ topLeftBackDotProduct * yWeightTopLeftFront * xWeightTopLeftFront *(1)
+ topRightFrontDotProduct * yWeightTopLeftFront * (1-xWeightTopLeftFront) *(-1)
+ topRightBackDotProduct * yWeightTopLeftFront * (1-xWeightTopLeftFront) *(1)
+ bottomLeftFrontDotProduct * (1-yWeightTopLeftFront) * xWeightTopLeftFront * (-1)
+ bottomLeftBackDotProduct * (1-yWeightTopLeftFront) * xWeightTopLeftFront * (1)
+ bottomRightFrontDotProduct * (1-yWeightTopLeftFront) * (1-xWeightTopLeftFront) *(-1)
+ bottomRightBackDotProduct * (1-yWeightTopLeftFront) * (1-xWeightTopLeftFront) * 1;
if(threadIdx.x==0)
{
gridData[threadIdx.y*3] = zf * (inputImages_depth-1) / 2;
gridData[threadIdx.y*3+1] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*3+2] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + zOut*gradGrids_strideDepth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[3]+15)/16, gradOutput->size[2], gradOutput->size[0]*gradOutput->size[1]);
dim3 threads(48,16);
hipLaunchKernelGGL(( backwardBilinearSampling <false>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 4),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 4),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 4),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, gradOutput, 1),
THCudaTensor_size(state, gradOutput, 3));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
// Perspective Transformation
__global__ void bilinearSamplingFromGridPerspective(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideDist, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int output_dist, int output_width, float focal_length)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 4 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int dist = output_dist;
const int b = blockIdx.z/dist;
const int disOut = blockIdx.z%dist;
float zf,yf,xf, disf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + disOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
zf = gridData[threadIdx.y*4];
yf = gridData[threadIdx.y*4+1];
xf = gridData[threadIdx.y*4+2];
disf = gridData[threadIdx.y*4+3];
//yf = yf / disf;
//xf = xf / disf;
//zf = zf / disf - (focal_length + 0.5);
const int outAddress = output_strideBatch * b + output_strideDist * disOut + output_strideHeight * yOut + output_strideWidth * xOut;
int zInFrontTopLeft, yInFrontTopLeft, xInFrontTopLeft;
float yWeightFrontTopLeft, xWeightFrontTopLeft, zWeightFrontTopLeft;
getTopLeftFront(zf, inputImages_depth, zInFrontTopLeft, zWeightFrontTopLeft);
getTopLeftFront(yf, inputImages_height, yInFrontTopLeft, yWeightFrontTopLeft);
getTopLeftFront(xf, inputImages_width, xInFrontTopLeft, xWeightFrontTopLeft);
const int inFrontTopLeftAddress = inputImages_strideBatch * b + inputImages_strideDepth * zInFrontTopLeft + inputImages_strideHeight * yInFrontTopLeft + inputImages_strideWidth * xInFrontTopLeft;
const int inFrontTopRightAddress = inFrontTopLeftAddress + inputImages_strideWidth;
const int inFrontBottomLeftAddress = inFrontTopLeftAddress + inputImages_strideHeight;
const int inFrontBottomRightAddress = inFrontBottomLeftAddress + inputImages_strideWidth;
const int inBackTopLeftAddress = inFrontTopLeftAddress + inputImages_strideDepth;
const int inBackTopRightAddress = inBackTopLeftAddress + inputImages_strideWidth;
const int inBackBottomLeftAddress = inBackTopLeftAddress + inputImages_strideHeight;
const int inBackBottomRightAddress = inBackBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inFrontTopLeft=0;
float inFrontTopRight=0;
float inFrontBottomLeft=0;
float inFrontBottomRight=0;
float inBackTopLeft=0;
float inBackTopRight=0;
float inBackBottomLeft=0;
float inBackBottomRight=0;
bool frontTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(frontTopLeftIsIn) inFrontTopLeft = inputImages_data[inFrontTopLeftAddress + t];
if(frontTopRightIsIn) inFrontTopRight = inputImages_data[inFrontTopRightAddress + t];
if(frontBottomLeftIsIn) inFrontBottomLeft = inputImages_data[inFrontBottomLeftAddress + t];
if(frontBottomRightIsIn) inFrontBottomRight = inputImages_data[inFrontBottomRightAddress + t];
if(backTopLeftIsIn) inBackTopLeft = inputImages_data[inBackTopLeftAddress + t];
if(backTopRightIsIn) inBackTopRight = inputImages_data[inBackTopRightAddress + t];
if(backBottomLeftIsIn) inBackBottomLeft = inputImages_data[inBackBottomLeftAddress + t];
if(backBottomRightIsIn) inBackBottomRight = inputImages_data[inBackBottomRightAddress + t];
v = xWeightFrontTopLeft * yWeightFrontTopLeft * zWeightFrontTopLeft * inFrontTopLeft
+ (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * zWeightFrontTopLeft * inFrontTopRight
+ xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * inFrontBottomLeft
+ (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * inFrontBottomRight
+ xWeightFrontTopLeft * yWeightFrontTopLeft * (1 - zWeightFrontTopLeft) * inBackTopLeft
+ (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * inBackTopRight
+ xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * inBackBottomLeft
+ (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * inBackBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerPerspective_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
float focal_length = lua_tonumber(L, 5);
dim3 blocks((output->size[3]+7)/8, output->size[2], output->size[0]*output->size[1]);
dim3 threads(32,8);
/* assume BHWD */
hipLaunchKernelGGL(( bilinearSamplingFromGridPerspective) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 4),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_stride(state, output, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, output, 1),
THCudaTensor_size(state, output, 3),
focal_length);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSamplingPerspective(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideDepth, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideDepth, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideDist, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int gradOutput_dist, int gradOutput_width, float focal_length)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 4 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int dist = gradOutput_dist;
const int b = blockIdx.z/dist;
const int disOut = blockIdx.z%dist;
float zf,yf,xf, disf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + disOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
zf = gridData[threadIdx.y*4];
yf = gridData[threadIdx.y*4+1];
xf = gridData[threadIdx.y*4+2];
disf = gridData[threadIdx.y*4+3];
//yf = yf / disf;
//xf = xf / disf;
//zf = zf / disf - (focal_length + 0.5);
int zInFrontTopLeft, yInFrontTopLeft, xInFrontTopLeft;
float yWeightFrontTopLeft, xWeightFrontTopLeft, zWeightFrontTopLeft;
getTopLeftFront(zf, inputImages_depth, zInFrontTopLeft, zWeightFrontTopLeft);
getTopLeftFront(yf, inputImages_height, yInFrontTopLeft, yWeightFrontTopLeft);
getTopLeftFront(xf, inputImages_width, xInFrontTopLeft, xWeightFrontTopLeft);
const int inFrontTopLeftAddress = inputImages_strideBatch * b + inputImages_strideDepth * zInFrontTopLeft + inputImages_strideHeight * yInFrontTopLeft + inputImages_strideWidth * xInFrontTopLeft;
const int inFrontTopRightAddress = inFrontTopLeftAddress + inputImages_strideWidth;
const int inFrontBottomLeftAddress = inFrontTopLeftAddress + inputImages_strideHeight;
const int inFrontBottomRightAddress = inFrontBottomLeftAddress + inputImages_strideWidth;
const int inBackTopLeftAddress = inFrontTopLeftAddress + inputImages_strideDepth;
const int inBackTopRightAddress = inBackTopLeftAddress + inputImages_strideWidth;
const int inBackBottomLeftAddress = inBackTopLeftAddress + inputImages_strideHeight;
const int inBackBottomRightAddress = inBackBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesFrontTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideDepth * zInFrontTopLeft + gradInputImages_strideHeight * yInFrontTopLeft + gradInputImages_strideWidth *xInFrontTopLeft;
const int gradInputImagesFrontTopRightAddress = gradInputImagesFrontTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesFrontBottomLeftAddress = gradInputImagesFrontTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesFrontBottomRightAddress = gradInputImagesFrontBottomLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBackTopLeftAddress = gradInputImagesFrontTopLeftAddress + gradInputImages_strideDepth;
const int gradInputImagesBackTopRightAddress = gradInputImagesBackTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBackBottomLeftAddress = gradInputImagesBackTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBackBottomRightAddress = gradInputImagesBackBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideDist * disOut + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float frontTopLeftDotProduct = 0;
float frontTopRightDotProduct = 0;
float frontBottomLeftDotProduct = 0;
float frontBottomRightDotProduct = 0;
float backTopLeftDotProduct = 0;
float backTopRightDotProduct = 0;
float backBottomLeftDotProduct = 0;
float backBottomRightDotProduct = 0;
bool frontTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
if(frontTopLeftIsIn)
{
float inFrontTopLeft = inputImages_data[inFrontTopLeftAddress + t];
frontTopLeftDotProduct += inFrontTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontTopLeftAddress + t], xWeightFrontTopLeft * yWeightFrontTopLeft * zWeightFrontTopLeft * gradOutValue);
}
if(frontTopRightIsIn)
{
float inFrontTopRight = inputImages_data[inFrontTopRightAddress + t];
frontTopRightDotProduct += inFrontTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontTopRightAddress + t], (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * zWeightFrontTopLeft * gradOutValue);
}
if(frontBottomLeftIsIn)
{
float inFrontBottomLeft = inputImages_data[inFrontBottomLeftAddress + t];
frontBottomLeftDotProduct += inFrontBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontBottomLeftAddress + t], xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * gradOutValue);
}
if(frontBottomRightIsIn)
{
float inFrontBottomRight = inputImages_data[inFrontBottomRightAddress + t];
frontBottomRightDotProduct += inFrontBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontBottomRightAddress + t], (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * gradOutValue);
}
if(backTopLeftIsIn)
{
float inBackTopLeft = inputImages_data[inBackTopLeftAddress + t];
backTopLeftDotProduct += inBackTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackTopLeftAddress + t], xWeightFrontTopLeft * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * gradOutValue);
}
if(backTopRightIsIn)
{
float inBackTopRight = inputImages_data[inBackTopRightAddress + t];
backTopRightDotProduct += inBackTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackTopRightAddress + t], (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * gradOutValue);
}
if(backBottomLeftIsIn)
{
float inBackBottomLeft = inputImages_data[inBackBottomLeftAddress + t];
backBottomLeftDotProduct += inBackBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackBottomLeftAddress + t], xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * gradOutValue);
}
if(backBottomRightIsIn)
{
float inBackBottomRight = inputImages_data[inBackBottomRightAddress + t];
backBottomRightDotProduct += inBackBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackBottomRightAddress + t], (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[8][32];
__shmem[threadIdx.y][threadIdx.x] = frontTopLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontTopLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backTopLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backTopLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = frontTopRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontTopRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backTopRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backTopRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = frontBottomLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontBottomLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backBottomLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backBottomLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = frontBottomRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontBottomRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backBottomRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backBottomRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
float dyf = frontTopLeftDotProduct * xWeightFrontTopLeft * zWeightFrontTopLeft * (-1)
+ backTopLeftDotProduct * xWeightFrontTopLeft * (1-zWeightFrontTopLeft) * (-1)
+ frontTopRightDotProduct * (1-xWeightFrontTopLeft) * zWeightFrontTopLeft * (-1)
+ backTopRightDotProduct * (1-xWeightFrontTopLeft) * (1-zWeightFrontTopLeft) *(-1)
+ frontBottomLeftDotProduct * xWeightFrontTopLeft * zWeightFrontTopLeft * (1)
+ backBottomLeftDotProduct * xWeightFrontTopLeft * (1-zWeightFrontTopLeft) * (1)
+ frontBottomRightDotProduct * (1-xWeightFrontTopLeft) * zWeightFrontTopLeft * (1)
+ backBottomRightDotProduct * (1-xWeightFrontTopLeft) * (1-zWeightFrontTopLeft) *(1);
float dxf = frontTopLeftDotProduct * yWeightFrontTopLeft * zWeightFrontTopLeft *(-1)
+ backTopLeftDotProduct * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) *(-1)
+ frontTopRightDotProduct * yWeightFrontTopLeft * zWeightFrontTopLeft * 1
+ backTopRightDotProduct * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * 1
+ frontBottomLeftDotProduct * (1-yWeightFrontTopLeft) * zWeightFrontTopLeft * (-1)
+ backBottomLeftDotProduct * (1-yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * (-1)
+ frontBottomRightDotProduct * (1-yWeightFrontTopLeft) * zWeightFrontTopLeft * 1
+ backBottomRightDotProduct * (1-yWeightFrontTopLeft) *(1-zWeightFrontTopLeft) * 1;
float dzf = frontTopLeftDotProduct * yWeightFrontTopLeft * xWeightFrontTopLeft * (-1)
+ backTopLeftDotProduct * yWeightFrontTopLeft * xWeightFrontTopLeft *1
+ frontTopRightDotProduct * yWeightFrontTopLeft * (1-xWeightFrontTopLeft) *(-1)
+ backTopRightDotProduct * yWeightFrontTopLeft * (1-xWeightFrontTopLeft) *1
+ frontBottomLeftDotProduct * (1-yWeightFrontTopLeft) * xWeightFrontTopLeft * (-1)
+ backBottomLeftDotProduct * (1-yWeightFrontTopLeft) * xWeightFrontTopLeft * 1
+ frontBottomRightDotProduct * (1-yWeightFrontTopLeft) * (1-xWeightFrontTopLeft) *(-1)
+ backBottomRightDotProduct * (1-yWeightFrontTopLeft) * (1-xWeightFrontTopLeft) * 1;
if(threadIdx.x==0)
{
gridData[threadIdx.y*4] = dzf * (inputImages_depth-1) / 2;
gridData[threadIdx.y*4+1] = dyf * (inputImages_height-1) / 2;
gridData[threadIdx.y*4+2] = dxf * (inputImages_width-1) / 2;
gridData[threadIdx.y*4+3] = 0;
//-(dyf* (inputImages_height-1) / 2*yf + dxf* (inputImages_width-1) / 2*xf + dzf* (inputImages_depth-1) / 2*(zf+focal_length+0.5))/disf;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + disOut*gradGrids_strideDepth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerPerspective_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
float focal_length = lua_tonumber(L, 7);
dim3 blocks((gradOutput->size[3]+7)/8, gradOutput->size[2], gradOutput->size[0]*gradOutput->size[1]);
dim3 threads(32,8);
hipLaunchKernelGGL(( backwardBilinearSamplingPerspective <false>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 4),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 4),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 4),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, gradOutput, 1),
THCudaTensor_size(state, gradOutput, 3),
focal_length);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_BilinearSamplerPerspective__ [] = {
{"BilinearSamplerBHWD_updateOutput", cunn_BilinearSamplerBHWD_updateOutput},
{"BilinearSamplerBHWD_updateGradInput", cunn_BilinearSamplerBHWD_updateGradInput},
{"BilinearSamplerPerspective_updateOutput", cunn_BilinearSamplerPerspective_updateOutput},
{"BilinearSamplerPerspective_updateGradInput", cunn_BilinearSamplerPerspective_updateGradInput},
//{"BilinearSamplerPerspective_updateGradInputOnlyGrid", cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_BilinearSamplerPerspective_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_BilinearSamplerPerspective__, "nn");
lua_pop(L,1);
}
| 7e12770cc0f3c2539b1ca1fc63650c92a4f488d0.cu | #include "utils.h"
// code adapted from github repo
// implemented by Yijie Guo ([email protected]) and Xinchen Yan ([email protected])
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeftFront(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
//if(threadIdx.x<32) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+32]; }
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16] + s[threadIdx.x+32]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__device__ void sumReduceShMemPerspective(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
// Affine Transformation
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideDepth, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int output_depth, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 3 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int depth = output_depth;
const int b = blockIdx.z/depth;
const int zOut = blockIdx.z%depth;
float zf, yf,xf;
__shared__ float gridData[48];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + zOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
zf = gridData[threadIdx.y*3];
yf = gridData[threadIdx.y*3+1];
xf = gridData[threadIdx.y*3+2];
//printf("%.3f %.3f %.3f\n", zf, yf, xf);
int yInTopLeftFront, xInTopLeftFront, zInTopLeftFront;
float yWeightTopLeftFront, xWeightTopLeftFront, zWeightTopLeftFront;
getTopLeftFront(zf, inputImages_depth, zInTopLeftFront, zWeightTopLeftFront);
getTopLeftFront(yf, inputImages_height, yInTopLeftFront, yWeightTopLeftFront);
getTopLeftFront(xf, inputImages_width, xInTopLeftFront, xWeightTopLeftFront);
//printf("GPU y[%.3f] x[%.3f] z[%.3f] WeightTopLeftFront\n",yWeightTopLeftFront, xWeightTopLeftFront, zWeightTopLeftFront);
// printf("GPU y[%d] x[%d] z[%d] InTopLeftFront\n",yInTopLeftFront, xInTopLeftFront, zInTopLeftFront);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut + output_strideDepth * zOut;
const int inTopLeftFrontAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeftFront
+ inputImages_strideWidth * xInTopLeftFront + inputImages_strideDepth * zInTopLeftFront;
const int inTopLeftBackAddress = inTopLeftFrontAddress + inputImages_strideDepth;
const int inTopRightFrontAddress = inTopLeftFrontAddress + inputImages_strideWidth;
const int inTopRightBackAddress = inTopRightFrontAddress + inputImages_strideDepth;
const int inBottomLeftFrontAddress = inTopLeftFrontAddress + inputImages_strideHeight;
const int inBottomLeftBackAddress = inBottomLeftFrontAddress + inputImages_strideDepth;
const int inBottomRightFrontAddress = inBottomLeftFrontAddress + inputImages_strideWidth;
const int inBottomRightBackAddress = inBottomRightFrontAddress + inputImages_strideDepth;
float v=0;
float inTopLeftFront=0;
float inTopLeftBack=0;
float inTopRightFront=0;
float inTopRightBack=0;
float inBottomLeftFront=0;
float inBottomLeftBack=0;
float inBottomRightFront=0;
float inBottomRightBack=0;
bool topLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool topRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftFrontIsIn) inTopLeftFront = inputImages_data[inTopLeftFrontAddress + t];
if(topLeftBackIsIn) inTopLeftBack = inputImages_data[inTopLeftBackAddress + t];
if(topRightFrontIsIn) inTopRightFront = inputImages_data[inTopRightFrontAddress + t];
if(topRightBackIsIn) inTopRightBack = inputImages_data[inTopRightBackAddress + t];
if(bottomLeftFrontIsIn) inBottomLeftFront = inputImages_data[inBottomLeftFrontAddress + t];
if(bottomLeftBackIsIn) inBottomLeftBack = inputImages_data[inBottomLeftBackAddress + t];
if(bottomRightFrontIsIn) inBottomRightFront = inputImages_data[inBottomRightFrontAddress + t];
if(bottomRightBackIsIn) inBottomRightBack = inputImages_data[inBottomRightBackAddress + t];
v = xWeightTopLeftFront * yWeightTopLeftFront * zWeightTopLeftFront * inTopLeftFront
+ xWeightTopLeftFront * yWeightTopLeftFront * (1-zWeightTopLeftFront) * inTopLeftBack
+ (1 - xWeightTopLeftFront) * yWeightTopLeftFront * zWeightTopLeftFront * inTopRightFront
+ (1 - xWeightTopLeftFront) * yWeightTopLeftFront * (1-zWeightTopLeftFront) * inTopRightBack
+ xWeightTopLeftFront * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * inBottomLeftFront
+ xWeightTopLeftFront * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * inBottomLeftBack
+ (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * inBottomRightFront
+ (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * inBottomRightBack;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
//printf("GPU hello\n");
dim3 blocks((output->size[3]+15)/16, output->size[2], output->size[0]*output->size[1]);
dim3 threads(48,16);
/* assume BHWD */
bilinearSamplingFromGrid <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 4),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_stride(state, output, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, output, 1),
THCudaTensor_size(state, output, 3));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
//printf("GPU forward end!\n");
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSampling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideDepth, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideDepth, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideDepth, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int gradOutput_depth, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 3 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int depth = gradOutput_depth;
const int b = blockIdx.z/depth;
const int zOut = blockIdx.z%depth;
float yf,xf, zf;
__shared__ float gridData[48];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + zOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
zf = gridData[threadIdx.y*3];
yf = gridData[threadIdx.y*3+1];
xf = gridData[threadIdx.y*3+2];
int yInTopLeftFront, xInTopLeftFront, zInTopLeftFront;
float yWeightTopLeftFront, xWeightTopLeftFront, zWeightTopLeftFront;
getTopLeftFront(zf, inputImages_depth, zInTopLeftFront, zWeightTopLeftFront);
getTopLeftFront(yf, inputImages_height, yInTopLeftFront, yWeightTopLeftFront);
getTopLeftFront(xf, inputImages_width, xInTopLeftFront, xWeightTopLeftFront);
const int inTopLeftFrontAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeftFront
+ inputImages_strideWidth * xInTopLeftFront + inputImages_strideDepth * zInTopLeftFront;
const int inTopLeftBackAddress = inTopLeftFrontAddress + inputImages_strideDepth;
const int inTopRightFrontAddress = inTopLeftFrontAddress + inputImages_strideWidth;
const int inTopRightBackAddress = inTopRightFrontAddress + inputImages_strideDepth;
const int inBottomLeftFrontAddress = inTopLeftFrontAddress + inputImages_strideHeight;
const int inBottomLeftBackAddress = inBottomLeftFrontAddress + inputImages_strideDepth;
const int inBottomRightFrontAddress = inBottomLeftFrontAddress + inputImages_strideWidth;
const int inBottomRightBackAddress = inBottomRightFrontAddress + inputImages_strideDepth;
const int gradInputImagesTopLeftFrontAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeftFront + gradInputImages_strideWidth * xInTopLeftFront + gradInputImages_strideDepth * zInTopLeftFront;
const int gradInputImagesTopLeftBackAddress = gradInputImagesTopLeftFrontAddress + gradInputImages_strideDepth;
const int gradInputImagesTopRightFrontAddress = gradInputImagesTopLeftFrontAddress + gradInputImages_strideWidth;
const int gradInputImagesTopRightBackAddress = gradInputImagesTopRightFrontAddress + gradInputImages_strideDepth;
const int gradInputImagesBottomLeftFrontAddress = gradInputImagesTopLeftFrontAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomLeftBackAddress = gradInputImagesBottomLeftFrontAddress + gradInputImages_strideDepth;
const int gradInputImagesBottomRightFrontAddress = gradInputImagesBottomLeftFrontAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomRightBackAddress = gradInputImagesBottomRightFrontAddress + gradInputImages_strideDepth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut + gradOutput_strideDepth * zOut;
float topLeftFrontDotProduct = 0;
float topLeftBackDotProduct = 0;
float topRightFrontDotProduct = 0;
float topRightBackDotProduct = 0;
float bottomLeftFrontDotProduct = 0;
float bottomLeftBackDotProduct = 0;
float bottomRightFrontDotProduct = 0;
float bottomRightBackDotProduct = 0;
bool topLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool topRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool topRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomLeftFrontIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomLeftBackIsIn = between(xInTopLeftFront, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
bool bottomRightFrontIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront, 0, inputImages_depth-1);
bool bottomRightBackIsIn = between(xInTopLeftFront+1, 0, inputImages_width-1)
&& between(yInTopLeftFront+1, 0, inputImages_height-1) && between(zInTopLeftFront+1, 0, inputImages_depth-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftFrontIsIn)
{
float inTopLeftFront = inputImages_data[inTopLeftFrontAddress + t];
topLeftFrontDotProduct += inTopLeftFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftFrontAddress + t], xWeightTopLeftFront * yWeightTopLeftFront * zWeightTopLeftFront * gradOutValue);
}
if(topLeftBackIsIn)
{
float inTopLeftBack = inputImages_data[inTopLeftBackAddress + t];
topLeftBackDotProduct += inTopLeftBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftBackAddress + t], xWeightTopLeftFront * yWeightTopLeftFront * (1-zWeightTopLeftFront) * gradOutValue);
}
if(topRightFrontIsIn)
{
float inTopRightFront = inputImages_data[inTopRightFrontAddress + t];
topRightFrontDotProduct += inTopRightFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightFrontAddress + t], (1 - xWeightTopLeftFront) * yWeightTopLeftFront * zWeightTopLeftFront * gradOutValue);
}
if(topRightBackIsIn)
{
float inTopRightBack = inputImages_data[inTopRightBackAddress + t];
topRightBackDotProduct += inTopRightBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightBackAddress + t], (1 - xWeightTopLeftFront) * yWeightTopLeftFront * (1-zWeightTopLeftFront) * gradOutValue);
}
if(bottomLeftFrontIsIn)
{
float inBottomLeftFront = inputImages_data[inBottomLeftFrontAddress + t];
bottomLeftFrontDotProduct += inBottomLeftFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftFrontAddress + t], xWeightTopLeftFront * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * gradOutValue);
}
if(bottomLeftBackIsIn)
{
float inBottomLeftBack = inputImages_data[inBottomLeftBackAddress + t];
bottomLeftBackDotProduct += inBottomLeftBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftBackAddress + t], xWeightTopLeftFront * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * gradOutValue);
}
if(bottomRightFrontIsIn)
{
float inBottomRightFront = inputImages_data[inBottomRightFrontAddress + t];
bottomRightFrontDotProduct += inBottomRightFront * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightFrontAddress + t], (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * zWeightTopLeftFront * gradOutValue);
}
if(bottomRightBackIsIn)
{
float inBottomRightBack = inputImages_data[inBottomRightBackAddress + t];
bottomRightBackDotProduct += inBottomRightBack * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightBackAddress + t], (1 - xWeightTopLeftFront) * (1 - yWeightTopLeftFront) * (1-zWeightTopLeftFront) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
/*
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
*/
__shared__ volatile float __shmem[16][48];
__shmem[threadIdx.y][threadIdx.x] = topLeftFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = topLeftBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = topRightFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = topRightBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomLeftFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomLeftBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomRightFrontDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightFrontDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = bottomRightBackDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightBackDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
yf = topLeftFrontDotProduct * xWeightTopLeftFront * zWeightTopLeftFront * (-1)
+ topLeftBackDotProduct * xWeightTopLeftFront * (1-zWeightTopLeftFront) * (-1)
+ topRightFrontDotProduct * (1-xWeightTopLeftFront) * zWeightTopLeftFront * (-1)
+ topRightBackDotProduct * (1-xWeightTopLeftFront) * (1-zWeightTopLeftFront) *(-1)
+ bottomLeftFrontDotProduct * xWeightTopLeftFront * zWeightTopLeftFront * (1)
+ bottomLeftBackDotProduct * xWeightTopLeftFront * (1-zWeightTopLeftFront) * (1)
+ bottomRightFrontDotProduct * (1-xWeightTopLeftFront) * zWeightTopLeftFront * (1)
+ bottomRightBackDotProduct * (1-xWeightTopLeftFront) * (1-zWeightTopLeftFront) *(1);
xf = topLeftFrontDotProduct * yWeightTopLeftFront * zWeightTopLeftFront *(-1)
+ topLeftBackDotProduct * yWeightTopLeftFront * (1-zWeightTopLeftFront) *(-1)
+ topRightFrontDotProduct * yWeightTopLeftFront * zWeightTopLeftFront * 1
+ topRightBackDotProduct * yWeightTopLeftFront * (1-zWeightTopLeftFront) * 1
+ bottomLeftFrontDotProduct * (1-yWeightTopLeftFront) * zWeightTopLeftFront * (-1)
+ bottomLeftBackDotProduct * (1-yWeightTopLeftFront) * (1-zWeightTopLeftFront) * (-1)
+ bottomRightFrontDotProduct * (1-yWeightTopLeftFront) * zWeightTopLeftFront * (1)
+ bottomRightBackDotProduct * (1-yWeightTopLeftFront) *(1-zWeightTopLeftFront) * (1);
zf = topLeftFrontDotProduct * yWeightTopLeftFront * xWeightTopLeftFront * (-1)
+ topLeftBackDotProduct * yWeightTopLeftFront * xWeightTopLeftFront *(1)
+ topRightFrontDotProduct * yWeightTopLeftFront * (1-xWeightTopLeftFront) *(-1)
+ topRightBackDotProduct * yWeightTopLeftFront * (1-xWeightTopLeftFront) *(1)
+ bottomLeftFrontDotProduct * (1-yWeightTopLeftFront) * xWeightTopLeftFront * (-1)
+ bottomLeftBackDotProduct * (1-yWeightTopLeftFront) * xWeightTopLeftFront * (1)
+ bottomRightFrontDotProduct * (1-yWeightTopLeftFront) * (1-xWeightTopLeftFront) *(-1)
+ bottomRightBackDotProduct * (1-yWeightTopLeftFront) * (1-xWeightTopLeftFront) * 1;
if(threadIdx.x==0)
{
gridData[threadIdx.y*3] = zf * (inputImages_depth-1) / 2;
gridData[threadIdx.y*3+1] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*3+2] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + zOut*gradGrids_strideDepth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[3]+15)/16, gradOutput->size[2], gradOutput->size[0]*gradOutput->size[1]);
dim3 threads(48,16);
backwardBilinearSampling <false> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 4),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 4),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 4),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, gradOutput, 1),
THCudaTensor_size(state, gradOutput, 3));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
// Perspective Transformation
__global__ void bilinearSamplingFromGridPerspective(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideDist, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int output_dist, int output_width, float focal_length)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 4 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int dist = output_dist;
const int b = blockIdx.z/dist;
const int disOut = blockIdx.z%dist;
float zf,yf,xf, disf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + disOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
zf = gridData[threadIdx.y*4];
yf = gridData[threadIdx.y*4+1];
xf = gridData[threadIdx.y*4+2];
disf = gridData[threadIdx.y*4+3];
//yf = yf / disf;
//xf = xf / disf;
//zf = zf / disf - (focal_length + 0.5);
const int outAddress = output_strideBatch * b + output_strideDist * disOut + output_strideHeight * yOut + output_strideWidth * xOut;
int zInFrontTopLeft, yInFrontTopLeft, xInFrontTopLeft;
float yWeightFrontTopLeft, xWeightFrontTopLeft, zWeightFrontTopLeft;
getTopLeftFront(zf, inputImages_depth, zInFrontTopLeft, zWeightFrontTopLeft);
getTopLeftFront(yf, inputImages_height, yInFrontTopLeft, yWeightFrontTopLeft);
getTopLeftFront(xf, inputImages_width, xInFrontTopLeft, xWeightFrontTopLeft);
const int inFrontTopLeftAddress = inputImages_strideBatch * b + inputImages_strideDepth * zInFrontTopLeft + inputImages_strideHeight * yInFrontTopLeft + inputImages_strideWidth * xInFrontTopLeft;
const int inFrontTopRightAddress = inFrontTopLeftAddress + inputImages_strideWidth;
const int inFrontBottomLeftAddress = inFrontTopLeftAddress + inputImages_strideHeight;
const int inFrontBottomRightAddress = inFrontBottomLeftAddress + inputImages_strideWidth;
const int inBackTopLeftAddress = inFrontTopLeftAddress + inputImages_strideDepth;
const int inBackTopRightAddress = inBackTopLeftAddress + inputImages_strideWidth;
const int inBackBottomLeftAddress = inBackTopLeftAddress + inputImages_strideHeight;
const int inBackBottomRightAddress = inBackBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inFrontTopLeft=0;
float inFrontTopRight=0;
float inFrontBottomLeft=0;
float inFrontBottomRight=0;
float inBackTopLeft=0;
float inBackTopRight=0;
float inBackBottomLeft=0;
float inBackBottomRight=0;
bool frontTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(frontTopLeftIsIn) inFrontTopLeft = inputImages_data[inFrontTopLeftAddress + t];
if(frontTopRightIsIn) inFrontTopRight = inputImages_data[inFrontTopRightAddress + t];
if(frontBottomLeftIsIn) inFrontBottomLeft = inputImages_data[inFrontBottomLeftAddress + t];
if(frontBottomRightIsIn) inFrontBottomRight = inputImages_data[inFrontBottomRightAddress + t];
if(backTopLeftIsIn) inBackTopLeft = inputImages_data[inBackTopLeftAddress + t];
if(backTopRightIsIn) inBackTopRight = inputImages_data[inBackTopRightAddress + t];
if(backBottomLeftIsIn) inBackBottomLeft = inputImages_data[inBackBottomLeftAddress + t];
if(backBottomRightIsIn) inBackBottomRight = inputImages_data[inBackBottomRightAddress + t];
v = xWeightFrontTopLeft * yWeightFrontTopLeft * zWeightFrontTopLeft * inFrontTopLeft
+ (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * zWeightFrontTopLeft * inFrontTopRight
+ xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * inFrontBottomLeft
+ (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * inFrontBottomRight
+ xWeightFrontTopLeft * yWeightFrontTopLeft * (1 - zWeightFrontTopLeft) * inBackTopLeft
+ (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * inBackTopRight
+ xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * inBackBottomLeft
+ (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * inBackBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerPerspective_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
float focal_length = lua_tonumber(L, 5);
dim3 blocks((output->size[3]+7)/8, output->size[2], output->size[0]*output->size[1]);
dim3 threads(32,8);
/* assume BHWD */
bilinearSamplingFromGridPerspective <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 4),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_stride(state, output, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, output, 1),
THCudaTensor_size(state, output, 3),
focal_length);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSamplingPerspective(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideDepth, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideDepth, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideDepth, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideDepth, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideDist, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_depth, int inputImages_height, int inputImages_width, int gradOutput_dist, int gradOutput_width, float focal_length)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 4 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int dist = gradOutput_dist;
const int b = blockIdx.z/dist;
const int disOut = blockIdx.z%dist;
float zf,yf,xf, disf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + disOut*grids_strideDepth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
zf = gridData[threadIdx.y*4];
yf = gridData[threadIdx.y*4+1];
xf = gridData[threadIdx.y*4+2];
disf = gridData[threadIdx.y*4+3];
//yf = yf / disf;
//xf = xf / disf;
//zf = zf / disf - (focal_length + 0.5);
int zInFrontTopLeft, yInFrontTopLeft, xInFrontTopLeft;
float yWeightFrontTopLeft, xWeightFrontTopLeft, zWeightFrontTopLeft;
getTopLeftFront(zf, inputImages_depth, zInFrontTopLeft, zWeightFrontTopLeft);
getTopLeftFront(yf, inputImages_height, yInFrontTopLeft, yWeightFrontTopLeft);
getTopLeftFront(xf, inputImages_width, xInFrontTopLeft, xWeightFrontTopLeft);
const int inFrontTopLeftAddress = inputImages_strideBatch * b + inputImages_strideDepth * zInFrontTopLeft + inputImages_strideHeight * yInFrontTopLeft + inputImages_strideWidth * xInFrontTopLeft;
const int inFrontTopRightAddress = inFrontTopLeftAddress + inputImages_strideWidth;
const int inFrontBottomLeftAddress = inFrontTopLeftAddress + inputImages_strideHeight;
const int inFrontBottomRightAddress = inFrontBottomLeftAddress + inputImages_strideWidth;
const int inBackTopLeftAddress = inFrontTopLeftAddress + inputImages_strideDepth;
const int inBackTopRightAddress = inBackTopLeftAddress + inputImages_strideWidth;
const int inBackBottomLeftAddress = inBackTopLeftAddress + inputImages_strideHeight;
const int inBackBottomRightAddress = inBackBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesFrontTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideDepth * zInFrontTopLeft + gradInputImages_strideHeight * yInFrontTopLeft + gradInputImages_strideWidth *xInFrontTopLeft;
const int gradInputImagesFrontTopRightAddress = gradInputImagesFrontTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesFrontBottomLeftAddress = gradInputImagesFrontTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesFrontBottomRightAddress = gradInputImagesFrontBottomLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBackTopLeftAddress = gradInputImagesFrontTopLeftAddress + gradInputImages_strideDepth;
const int gradInputImagesBackTopRightAddress = gradInputImagesBackTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBackBottomLeftAddress = gradInputImagesBackTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBackBottomRightAddress = gradInputImagesBackBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideDist * disOut + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float frontTopLeftDotProduct = 0;
float frontTopRightDotProduct = 0;
float frontBottomLeftDotProduct = 0;
float frontBottomRightDotProduct = 0;
float backTopLeftDotProduct = 0;
float backTopRightDotProduct = 0;
float backBottomLeftDotProduct = 0;
float backBottomRightDotProduct = 0;
bool frontTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backTopRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomLeftIsIn = between(xInFrontTopLeft, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
bool frontBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft, 0, inputImages_depth-1);
bool backBottomRightIsIn = between(xInFrontTopLeft+1, 0, inputImages_width-1)
&& between(yInFrontTopLeft+1, 0, inputImages_height-1) && between(zInFrontTopLeft+1, 0, inputImages_depth-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
if(frontTopLeftIsIn)
{
float inFrontTopLeft = inputImages_data[inFrontTopLeftAddress + t];
frontTopLeftDotProduct += inFrontTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontTopLeftAddress + t], xWeightFrontTopLeft * yWeightFrontTopLeft * zWeightFrontTopLeft * gradOutValue);
}
if(frontTopRightIsIn)
{
float inFrontTopRight = inputImages_data[inFrontTopRightAddress + t];
frontTopRightDotProduct += inFrontTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontTopRightAddress + t], (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * zWeightFrontTopLeft * gradOutValue);
}
if(frontBottomLeftIsIn)
{
float inFrontBottomLeft = inputImages_data[inFrontBottomLeftAddress + t];
frontBottomLeftDotProduct += inFrontBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontBottomLeftAddress + t], xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * gradOutValue);
}
if(frontBottomRightIsIn)
{
float inFrontBottomRight = inputImages_data[inFrontBottomRightAddress + t];
frontBottomRightDotProduct += inFrontBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesFrontBottomRightAddress + t], (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * zWeightFrontTopLeft * gradOutValue);
}
if(backTopLeftIsIn)
{
float inBackTopLeft = inputImages_data[inBackTopLeftAddress + t];
backTopLeftDotProduct += inBackTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackTopLeftAddress + t], xWeightFrontTopLeft * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * gradOutValue);
}
if(backTopRightIsIn)
{
float inBackTopRight = inputImages_data[inBackTopRightAddress + t];
backTopRightDotProduct += inBackTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackTopRightAddress + t], (1 - xWeightFrontTopLeft) * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * gradOutValue);
}
if(backBottomLeftIsIn)
{
float inBackBottomLeft = inputImages_data[inBackBottomLeftAddress + t];
backBottomLeftDotProduct += inBackBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackBottomLeftAddress + t], xWeightFrontTopLeft * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * gradOutValue);
}
if(backBottomRightIsIn)
{
float inBackBottomRight = inputImages_data[inBackBottomRightAddress + t];
backBottomRightDotProduct += inBackBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBackBottomRightAddress + t], (1 - xWeightFrontTopLeft) * (1 - yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[8][32];
__shmem[threadIdx.y][threadIdx.x] = frontTopLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontTopLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backTopLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backTopLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = frontTopRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontTopRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backTopRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backTopRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = frontBottomLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontBottomLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backBottomLeftDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backBottomLeftDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = frontBottomRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
frontBottomRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
__shmem[threadIdx.y][threadIdx.x] = backBottomRightDotProduct;
sumReduceShMemPerspective(__shmem[threadIdx.y]);
backBottomRightDotProduct = __shmem[threadIdx.y][0];
//__syncthreads();
float dyf = frontTopLeftDotProduct * xWeightFrontTopLeft * zWeightFrontTopLeft * (-1)
+ backTopLeftDotProduct * xWeightFrontTopLeft * (1-zWeightFrontTopLeft) * (-1)
+ frontTopRightDotProduct * (1-xWeightFrontTopLeft) * zWeightFrontTopLeft * (-1)
+ backTopRightDotProduct * (1-xWeightFrontTopLeft) * (1-zWeightFrontTopLeft) *(-1)
+ frontBottomLeftDotProduct * xWeightFrontTopLeft * zWeightFrontTopLeft * (1)
+ backBottomLeftDotProduct * xWeightFrontTopLeft * (1-zWeightFrontTopLeft) * (1)
+ frontBottomRightDotProduct * (1-xWeightFrontTopLeft) * zWeightFrontTopLeft * (1)
+ backBottomRightDotProduct * (1-xWeightFrontTopLeft) * (1-zWeightFrontTopLeft) *(1);
float dxf = frontTopLeftDotProduct * yWeightFrontTopLeft * zWeightFrontTopLeft *(-1)
+ backTopLeftDotProduct * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) *(-1)
+ frontTopRightDotProduct * yWeightFrontTopLeft * zWeightFrontTopLeft * 1
+ backTopRightDotProduct * yWeightFrontTopLeft * (1-zWeightFrontTopLeft) * 1
+ frontBottomLeftDotProduct * (1-yWeightFrontTopLeft) * zWeightFrontTopLeft * (-1)
+ backBottomLeftDotProduct * (1-yWeightFrontTopLeft) * (1-zWeightFrontTopLeft) * (-1)
+ frontBottomRightDotProduct * (1-yWeightFrontTopLeft) * zWeightFrontTopLeft * 1
+ backBottomRightDotProduct * (1-yWeightFrontTopLeft) *(1-zWeightFrontTopLeft) * 1;
float dzf = frontTopLeftDotProduct * yWeightFrontTopLeft * xWeightFrontTopLeft * (-1)
+ backTopLeftDotProduct * yWeightFrontTopLeft * xWeightFrontTopLeft *1
+ frontTopRightDotProduct * yWeightFrontTopLeft * (1-xWeightFrontTopLeft) *(-1)
+ backTopRightDotProduct * yWeightFrontTopLeft * (1-xWeightFrontTopLeft) *1
+ frontBottomLeftDotProduct * (1-yWeightFrontTopLeft) * xWeightFrontTopLeft * (-1)
+ backBottomLeftDotProduct * (1-yWeightFrontTopLeft) * xWeightFrontTopLeft * 1
+ frontBottomRightDotProduct * (1-yWeightFrontTopLeft) * (1-xWeightFrontTopLeft) *(-1)
+ backBottomRightDotProduct * (1-yWeightFrontTopLeft) * (1-xWeightFrontTopLeft) * 1;
if(threadIdx.x==0)
{
gridData[threadIdx.y*4] = dzf * (inputImages_depth-1) / 2;
gridData[threadIdx.y*4+1] = dyf * (inputImages_height-1) / 2;
gridData[threadIdx.y*4+2] = dxf * (inputImages_width-1) / 2;
gridData[threadIdx.y*4+3] = 0;
//-(dyf* (inputImages_height-1) / 2*yf + dxf* (inputImages_width-1) / 2*xf + dzf* (inputImages_depth-1) / 2*(zf+focal_length+0.5))/disf;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + disOut*gradGrids_strideDepth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerPerspective_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
float focal_length = lua_tonumber(L, 7);
dim3 blocks((gradOutput->size[3]+7)/8, gradOutput->size[2], gradOutput->size[0]*gradOutput->size[1]);
dim3 threads(32,8);
backwardBilinearSamplingPerspective <false> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 4),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 4),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 4),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 4),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 4),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_size(state, inputImages, 4),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, gradOutput, 1),
THCudaTensor_size(state, gradOutput, 3),
focal_length);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_BilinearSamplerPerspective__ [] = {
{"BilinearSamplerBHWD_updateOutput", cunn_BilinearSamplerBHWD_updateOutput},
{"BilinearSamplerBHWD_updateGradInput", cunn_BilinearSamplerBHWD_updateGradInput},
{"BilinearSamplerPerspective_updateOutput", cunn_BilinearSamplerPerspective_updateOutput},
{"BilinearSamplerPerspective_updateGradInput", cunn_BilinearSamplerPerspective_updateGradInput},
//{"BilinearSamplerPerspective_updateGradInputOnlyGrid", cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_BilinearSamplerPerspective_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_BilinearSamplerPerspective__, "nn");
lua_pop(L,1);
}
|
2b02fe4d947aa60e5088670652c8da043ea30c5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <[email protected]>,
// Written by Apoorv Vyas <[email protected]>
//
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 1, torch::RestrictPtrTraits> int_accessor_1d;
typedef torch::PackedTensorAccessor32<int32_t, 2, torch::RestrictPtrTraits> int_accessor_2d;
typedef torch::PackedTensorAccessor32<int32_t, 3, torch::RestrictPtrTraits> int_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
#define EPB 32
#define QPB 16
#define QPT 16
// We assume the queries have been rearranged such
// that first continuous block belongs to first cluster,
// next to second and so forth.
// The idea is that each thread now aggregates multiple
// Queries and then does one atomic write.
// If the cluster-id remains same we aggregate, else we
// do atomic write for that cluster and move-on.
// Threads in y dimension refer to the embedding dimension.
__global__ void clustered_aggregate_kernel(
const float_accessor_4d x,
const int_accessor_3d g,
const float_accessor_3d f,
float_accessor_4d y,
const int_accessor_1d lengths,
int Le
) {
int N = x.size(0);
int H = x.size(1);
int L = x.size(2);
int E = x.size(3);
int C = y.size(2);
int e_idx = threadIdx.y;
int full_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int hl = H*Le;
int n = full_idx / hl;
int h = (full_idx % hl) / Le;
int lb = full_idx - n*hl - h*Le;
int l = lb*QPT;
if ((n >= N)) {
return;
}
int l_max = lengths[n];
if (l >= l_max) {
return;
}
int l_end = (l + QPT) < l_max ? (l + QPT):l_max;
int k_cur = g[n][h][l];
int k_prev = k_cur;
for (int e=threadIdx.y; e<E; e+=EPB) {
float res = 0.0;
k_cur = g[n][h][l];
k_prev = k_cur;
float f_cur = f[n][h][k_cur];
for (int i=l; i<l_end; i++) {
k_cur = g[n][h][i];
if (k_cur == k_prev) {
res += (f_cur * x[n][h][i][e]);
}
else {
atomicAdd(&y[n][h][k_prev][e], res);
f_cur = f[n][h][k_cur];
k_prev = k_cur;
res = (f_cur * x[n][h][i][e]);
}
}
atomicAdd(&y[n][h][k_cur][e], res);
}
}
/**
* Aggregate the passed vectors X based on group indices in G multiplied by
* factors F.
*/
void clustered_aggregate(
const torch::Tensor X,
const torch::Tensor G,
const torch::Tensor F,
const torch::Tensor lengths,
torch::Tensor Y
) {
// Make sure that we are using the correct GPU device
torch::DeviceGuard _guard(X.device());
int N = X.size(0);
int H = X.size(1);
int L = X.size(2);
int E = X.size(3);
int C = Y.size(2);
// Each thread works on QPT queries
// Le = (L + QPT - 1) / QPT
// There are QPB threads per block in x direction
// blocks = ((N * H * Le) + QPB - 1) // QPB;
int Le = (L + QPT - 1) / QPT;
int blocks = ((N*H*Le) + QPB - 1) / QPB;
dim3 dimBlock(QPB, EPB);
hipLaunchKernelGGL(( clustered_aggregate_kernel), dim3(blocks), dim3(dimBlock), 0, 0,
X.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
G.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(),
F.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
Y.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
lengths.packed_accessor32<int, 1, torch::RestrictPtrTraits>(),
Le
);
}
// Each block works on a group of clusters
// The number of clusters in the group are
// decided by the size of shared memory
// Assume E dimension of each vector to be broadcasted
// #clusters * E < 192 * 64
// 192 * 64 are the total number of floats we can hold in
// shared memory
// The idea is the
// Originally we needed (N*H*L)/threads_size blocks
// To calculate the required number of blocks now
// we will additionally need the counts of clusters
// For group 1, we need N*H*(sum of counts in g1) / threads blocks
// For group 2, we need N*H*(sum of counts in g2) / threads blocks
// Note that in practice we will not need crazy number of groups.
// For E=64, a single group can hold 192 clusters.
__global__ void clustered_broadcast_kernel(
const float_accessor_4d y,
const int_accessor_3d sorted_g,
const float_accessor_3d f,
float_accessor_4d x,
int_accessor_2d indx_maps,
const int G
) {
int N = x.size(0);
int H = x.size(1);
int L = x.size(2);
int E = x.size(3);
int C = y.size(2);
extern __shared__ float shared_mem[];
int n = indx_maps[blockIdx.x][0];
int h = indx_maps[blockIdx.x][1];
int g = indx_maps[blockIdx.x][2];
int l = indx_maps[blockIdx.x][3] + threadIdx.x;
int n_queries = indx_maps[blockIdx.x][4];
// Load the values to broadcast and factors into shared memory
int clusters_to_load = C / G;
int cluster_offset = g * clusters_to_load;
float* shared_values = shared_mem;
float* shared_factors = shared_mem + clusters_to_load*E;
for (int c=threadIdx.x; c<clusters_to_load; c+=blockDim.x) {
for (int e=0; e<E; e++) {
shared_values[e*clusters_to_load + c] = y[n][h][c+cluster_offset][e];
}
shared_factors[c] = f[n][h][c+cluster_offset];
}
__syncthreads();
// Bail if out of bounds
if (threadIdx.x >= n_queries) {
return;
}
int k = sorted_g[n][h][l];
// if ((k < 0) || (k >= C)) {
// return;
// }
k -= cluster_offset;
// Copy-broadcast from y into x
float factor = shared_factors[k];
for (int e=0; e<E; e++) {
x[n][h][l][e] = (shared_values[e*clusters_to_load + k] * factor);
}
}
__global__
void create_maps(
const int_accessor_3d group_counts,
const int_accessor_3d block_counts,
const int threads,
int_accessor_2d indx_maps
) {
if (threadIdx.x == 0) {
int N = group_counts.size(0);
int H = group_counts.size(1);
int G = group_counts.size(2);
int indx = 0;
for (int n=0; n<N; n++){
for (int h=0; h<H; h++) {
int acc_g_count = 0;
for (int g=0; g<G; g++) {
int q_id = acc_g_count;
int q_end_id = 0;
int g_count = group_counts[n][h][g];
int blocks = block_counts[n][h][g];
for (int b=0; b<blocks; b++) {
indx_maps[indx][0] = n;
indx_maps[indx][1] = h;
indx_maps[indx][2] = g;
indx_maps[indx][3] = q_id;
q_end_id += threads;
indx_maps[indx][4] = (q_end_id < g_count) ? threads:g_count - (b*threads);
q_id += threads;
indx++;
}
acc_g_count += g_count;
}
}
}
}
}
/**
* Broadcast the aggregated results from tensor Y back to tensor X based on
* group indices G multiplied by factors F.
*/
void clustered_broadcast(
const torch::Tensor Y,
const torch::Tensor G,
const torch::Tensor F,
torch::Tensor X,
const torch::Tensor block_counts,
const torch::Tensor group_counts,
const int threads,
const int n_groups,
const int blocks,
torch::Tensor indx_maps
) {
// Make sure that we are using the correct GPU device
torch::DeviceGuard _guard(Y.device());
int N = X.size(0);
int H = X.size(1);
int L = X.size(2);
int E = X.size(3);
int C = Y.size(2);
hipLaunchKernelGGL(( create_maps), dim3(1), dim3(1), 0, 0,
group_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(),
block_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(),
threads,
indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>()
);
const int clusters_to_load = C / n_groups;
const int shared_mem = (E+1) * clusters_to_load * sizeof(float);
hipLaunchKernelGGL(( clustered_broadcast_kernel), dim3(blocks), dim3(threads), shared_mem, 0,
Y.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
G.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(),
F.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
X.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
n_groups
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"clustered_aggregate",
&clustered_aggregate,
"Aggregate the vectors of X based on the "
"indices in G multiplied by F."
);
m.def(
"clustered_broadcast",
&clustered_broadcast,
"Broadcast the vectors of Y based on the indices "
"in G multiplied by F back to X."
);
}
| 2b02fe4d947aa60e5088670652c8da043ea30c5b.cu | //
// Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
// Written by Angelos Katharopoulos <[email protected]>,
// Written by Apoorv Vyas <[email protected]>
//
#include <torch/extension.h>
typedef torch::PackedTensorAccessor32<int32_t, 1, torch::RestrictPtrTraits> int_accessor_1d;
typedef torch::PackedTensorAccessor32<int32_t, 2, torch::RestrictPtrTraits> int_accessor_2d;
typedef torch::PackedTensorAccessor32<int32_t, 3, torch::RestrictPtrTraits> int_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor_3d;
typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d;
#define EPB 32
#define QPB 16
#define QPT 16
// We assume the queries have been rearranged such
// that first continuous block belongs to first cluster,
// next to second and so forth.
// The idea is that each thread now aggregates multiple
// Queries and then does one atomic write.
// If the cluster-id remains same we aggregate, else we
// do atomic write for that cluster and move-on.
// Threads in y dimension refer to the embedding dimension.
__global__ void clustered_aggregate_kernel(
const float_accessor_4d x,
const int_accessor_3d g,
const float_accessor_3d f,
float_accessor_4d y,
const int_accessor_1d lengths,
int Le
) {
int N = x.size(0);
int H = x.size(1);
int L = x.size(2);
int E = x.size(3);
int C = y.size(2);
int e_idx = threadIdx.y;
int full_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int hl = H*Le;
int n = full_idx / hl;
int h = (full_idx % hl) / Le;
int lb = full_idx - n*hl - h*Le;
int l = lb*QPT;
if ((n >= N)) {
return;
}
int l_max = lengths[n];
if (l >= l_max) {
return;
}
int l_end = (l + QPT) < l_max ? (l + QPT):l_max;
int k_cur = g[n][h][l];
int k_prev = k_cur;
for (int e=threadIdx.y; e<E; e+=EPB) {
float res = 0.0;
k_cur = g[n][h][l];
k_prev = k_cur;
float f_cur = f[n][h][k_cur];
for (int i=l; i<l_end; i++) {
k_cur = g[n][h][i];
if (k_cur == k_prev) {
res += (f_cur * x[n][h][i][e]);
}
else {
atomicAdd(&y[n][h][k_prev][e], res);
f_cur = f[n][h][k_cur];
k_prev = k_cur;
res = (f_cur * x[n][h][i][e]);
}
}
atomicAdd(&y[n][h][k_cur][e], res);
}
}
/**
* Aggregate the passed vectors X based on group indices in G multiplied by
* factors F.
*/
void clustered_aggregate(
const torch::Tensor X,
const torch::Tensor G,
const torch::Tensor F,
const torch::Tensor lengths,
torch::Tensor Y
) {
// Make sure that we are using the correct GPU device
torch::DeviceGuard _guard(X.device());
int N = X.size(0);
int H = X.size(1);
int L = X.size(2);
int E = X.size(3);
int C = Y.size(2);
// Each thread works on QPT queries
// Le = (L + QPT - 1) / QPT
// There are QPB threads per block in x direction
// blocks = ((N * H * Le) + QPB - 1) // QPB;
int Le = (L + QPT - 1) / QPT;
int blocks = ((N*H*Le) + QPB - 1) / QPB;
dim3 dimBlock(QPB, EPB);
clustered_aggregate_kernel<<<blocks, dimBlock>>>(
X.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
G.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(),
F.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
Y.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
lengths.packed_accessor32<int, 1, torch::RestrictPtrTraits>(),
Le
);
}
// Each block works on a group of clusters
// The number of clusters in the group are
// decided by the size of shared memory
// Assume E dimension of each vector to be broadcasted
// #clusters * E < 192 * 64
// 192 * 64 are the total number of floats we can hold in
// shared memory
// The idea is the
// Originally we needed (N*H*L)/threads_size blocks
// To calculate the required number of blocks now
// we will additionally need the counts of clusters
// For group 1, we need N*H*(sum of counts in g1) / threads blocks
// For group 2, we need N*H*(sum of counts in g2) / threads blocks
// Note that in practice we will not need crazy number of groups.
// For E=64, a single group can hold 192 clusters.
__global__ void clustered_broadcast_kernel(
const float_accessor_4d y,
const int_accessor_3d sorted_g,
const float_accessor_3d f,
float_accessor_4d x,
int_accessor_2d indx_maps,
const int G
) {
int N = x.size(0);
int H = x.size(1);
int L = x.size(2);
int E = x.size(3);
int C = y.size(2);
extern __shared__ float shared_mem[];
int n = indx_maps[blockIdx.x][0];
int h = indx_maps[blockIdx.x][1];
int g = indx_maps[blockIdx.x][2];
int l = indx_maps[blockIdx.x][3] + threadIdx.x;
int n_queries = indx_maps[blockIdx.x][4];
// Load the values to broadcast and factors into shared memory
int clusters_to_load = C / G;
int cluster_offset = g * clusters_to_load;
float* shared_values = shared_mem;
float* shared_factors = shared_mem + clusters_to_load*E;
for (int c=threadIdx.x; c<clusters_to_load; c+=blockDim.x) {
for (int e=0; e<E; e++) {
shared_values[e*clusters_to_load + c] = y[n][h][c+cluster_offset][e];
}
shared_factors[c] = f[n][h][c+cluster_offset];
}
__syncthreads();
// Bail if out of bounds
if (threadIdx.x >= n_queries) {
return;
}
int k = sorted_g[n][h][l];
// if ((k < 0) || (k >= C)) {
// return;
// }
k -= cluster_offset;
// Copy-broadcast from y into x
float factor = shared_factors[k];
for (int e=0; e<E; e++) {
x[n][h][l][e] = (shared_values[e*clusters_to_load + k] * factor);
}
}
__global__
void create_maps(
const int_accessor_3d group_counts,
const int_accessor_3d block_counts,
const int threads,
int_accessor_2d indx_maps
) {
if (threadIdx.x == 0) {
int N = group_counts.size(0);
int H = group_counts.size(1);
int G = group_counts.size(2);
int indx = 0;
for (int n=0; n<N; n++){
for (int h=0; h<H; h++) {
int acc_g_count = 0;
for (int g=0; g<G; g++) {
int q_id = acc_g_count;
int q_end_id = 0;
int g_count = group_counts[n][h][g];
int blocks = block_counts[n][h][g];
for (int b=0; b<blocks; b++) {
indx_maps[indx][0] = n;
indx_maps[indx][1] = h;
indx_maps[indx][2] = g;
indx_maps[indx][3] = q_id;
q_end_id += threads;
indx_maps[indx][4] = (q_end_id < g_count) ? threads:g_count - (b*threads);
q_id += threads;
indx++;
}
acc_g_count += g_count;
}
}
}
}
}
/**
* Broadcast the aggregated results from tensor Y back to tensor X based on
* group indices G multiplied by factors F.
*/
void clustered_broadcast(
const torch::Tensor Y,
const torch::Tensor G,
const torch::Tensor F,
torch::Tensor X,
const torch::Tensor block_counts,
const torch::Tensor group_counts,
const int threads,
const int n_groups,
const int blocks,
torch::Tensor indx_maps
) {
// Make sure that we are using the correct GPU device
torch::DeviceGuard _guard(Y.device());
int N = X.size(0);
int H = X.size(1);
int L = X.size(2);
int E = X.size(3);
int C = Y.size(2);
create_maps<<<1, 1>>>(
group_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(),
block_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(),
threads,
indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>()
);
const int clusters_to_load = C / n_groups;
const int shared_mem = (E+1) * clusters_to_load * sizeof(float);
clustered_broadcast_kernel<<<blocks, threads, shared_mem>>>(
Y.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
G.packed_accessor32<int32_t, 3, torch::RestrictPtrTraits>(),
F.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
X.packed_accessor32<float, 4, torch::RestrictPtrTraits>(),
indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(),
n_groups
);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"clustered_aggregate",
&clustered_aggregate,
"Aggregate the vectors of X based on the "
"indices in G multiplied by F."
);
m.def(
"clustered_broadcast",
&clustered_broadcast,
"Broadcast the vectors of Y based on the indices "
"in G multiplied by F back to X."
);
}
|
eabf30271b066629665d04da96d0783c7502221e.hip | // !!! This is a file automatically generated by hipify!!!
#include <fstream>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <mpi.h>
#include "util.h"
#include "CudaStream.h"
#include "CudaEvent.h"
// 2D diffusion example with mpi
// the grid has a fixed width of nx=128
// the use specifies the height, ny, as a power of two
// note that nx and ny have 2 added to them to account for halos
//
// the domain decomposition is in the vertical
// ny is the height of the local sub-domain
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank);
template <typename T>
void fill_gpu(T *v, T value, int n);
__global__
void diffusion(double *x0, double *x1, int nx, int ny, double dt) {
// TODO : copy stencil implemented in diffusion2d.cu
}
int main(int argc, char** argv) {
// set up parameters
// first argument is the y dimension = 2^arg
size_t pow = read_arg(argc, argv, 1, 8);
// second argument is the number of time steps
size_t nsteps = read_arg(argc, argv, 2, 100);
// set domain size
size_t nx = 128;
size_t ny = 1 << pow;
double dt = 0.1;
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// calculate global domain sizes
if(ny%mpi_size) {
std::cout << "error : global domain dimension " << ny
<< "must be divisible by number of MPI ranks " << mpi_size
<< std::endl;
exit(1);
}
else if(mpi_rank==0) {
std::cout << "\n## " << mpi_size << " MPI ranks" << std::endl;
std::cout << "## " << nx << "x" << ny
<< " : " << nx << "x" << ny/mpi_size << " per rank"
<< " for " << nsteps << " time steps"
<< " (" << nx*ny << " grid points)"
<< std::endl;
}
ny /= mpi_size;
// adjust dimensions for halo
nx += 2;
ny += 2;
// allocate memory on device and host
// note : allocate enough memory for the halo around the boundary
auto buffer_size = nx*ny;
double *x_host = malloc_host_pinned<double>(buffer_size);
double *x0 = malloc_device<double>(buffer_size);
double *x1 = malloc_device<double>(buffer_size);
// set initial conditions of 0 everywhere
fill_gpu(x0, 0., buffer_size);
fill_gpu(x1, 0., buffer_size);
// set boundary conditions of 1 on south border
if(mpi_rank==0) { // south boundary
fill_gpu(x0, 1., nx);
fill_gpu(x1, 1., nx);
}
if(mpi_rank==mpi_size-1) { // north boundary
fill_gpu(x0+nx*(ny-1), 1., nx);
fill_gpu(x1+nx*(ny-1), 1., nx);
}
CudaStream stream;
CudaStream copy_stream(true);
auto start_event = stream.enqueue_event();
// time stepping loop
for(auto step=0; step<nsteps; ++step) {
// TODO perform halo exchange
// x0(:, 0) <- south
// x0(:, 1) -> south
// x0(:, ny-1) <- north
// x0(:, ny-2) -> north
// TODO copy in the kernel launch from diffusion2d.cu
std::swap(x0, x1);
}
auto stop_event = stream.enqueue_event();
stop_event.wait();
copy_to_host<double>(x0, x_host, buffer_size);
double time = stop_event.time_since(start_event);
if(mpi_rank==0) {
std::cout << "## " << time << "s, "
<< nsteps*(nx-2)*(ny-2)*mpi_size / time << " points/second"
<< std::endl << std::endl;
std::cout << "writing to output.bin/bov" << std::endl;
}
write_to_file(nx, ny, x_host, mpi_size, mpi_rank);
MPI_Finalize();
return 0;
}
template <typename T>
__global__
void fill(T *v, T value, int n) {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid<n) {
v[tid] = value;
}
}
template <typename T>
void fill_gpu(T *v, T value, int n) {
auto block_dim = 192ul;
auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0);
hipLaunchKernelGGL(( fill<T>), dim3(grid_dim), dim3(block_dim), 0, 0, v, value, n);
}
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank) {
// collect the global solution to the root rank
auto block_size = nx*(ny-2); // discard first and last rows
std::vector<double> data_global(mpi_size*block_size);
MPI_Gather(data+nx, block_size, MPI_DOUBLE,
&data_global[0], block_size, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(mpi_rank==0) {
FILE* output = fopen("output.bin", "w");
fwrite(&data_global[0], sizeof(double), mpi_size* nx * (ny-2), output);
fclose(output);
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << nx << ", " << mpi_size*(ny-2) << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl;
}
}
| eabf30271b066629665d04da96d0783c7502221e.cu | #include <fstream>
#include <iostream>
#include <vector>
#include <cuda.h>
#include <mpi.h>
#include "util.h"
#include "CudaStream.h"
#include "CudaEvent.h"
// 2D diffusion example with mpi
// the grid has a fixed width of nx=128
// the use specifies the height, ny, as a power of two
// note that nx and ny have 2 added to them to account for halos
//
// the domain decomposition is in the vertical
// ny is the height of the local sub-domain
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank);
template <typename T>
void fill_gpu(T *v, T value, int n);
__global__
void diffusion(double *x0, double *x1, int nx, int ny, double dt) {
// TODO : copy stencil implemented in diffusion2d.cu
}
int main(int argc, char** argv) {
// set up parameters
// first argument is the y dimension = 2^arg
size_t pow = read_arg(argc, argv, 1, 8);
// second argument is the number of time steps
size_t nsteps = read_arg(argc, argv, 2, 100);
// set domain size
size_t nx = 128;
size_t ny = 1 << pow;
double dt = 0.1;
// initialize MPI
int mpi_rank, mpi_size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
// calculate global domain sizes
if(ny%mpi_size) {
std::cout << "error : global domain dimension " << ny
<< "must be divisible by number of MPI ranks " << mpi_size
<< std::endl;
exit(1);
}
else if(mpi_rank==0) {
std::cout << "\n## " << mpi_size << " MPI ranks" << std::endl;
std::cout << "## " << nx << "x" << ny
<< " : " << nx << "x" << ny/mpi_size << " per rank"
<< " for " << nsteps << " time steps"
<< " (" << nx*ny << " grid points)"
<< std::endl;
}
ny /= mpi_size;
// adjust dimensions for halo
nx += 2;
ny += 2;
// allocate memory on device and host
// note : allocate enough memory for the halo around the boundary
auto buffer_size = nx*ny;
double *x_host = malloc_host_pinned<double>(buffer_size);
double *x0 = malloc_device<double>(buffer_size);
double *x1 = malloc_device<double>(buffer_size);
// set initial conditions of 0 everywhere
fill_gpu(x0, 0., buffer_size);
fill_gpu(x1, 0., buffer_size);
// set boundary conditions of 1 on south border
if(mpi_rank==0) { // south boundary
fill_gpu(x0, 1., nx);
fill_gpu(x1, 1., nx);
}
if(mpi_rank==mpi_size-1) { // north boundary
fill_gpu(x0+nx*(ny-1), 1., nx);
fill_gpu(x1+nx*(ny-1), 1., nx);
}
CudaStream stream;
CudaStream copy_stream(true);
auto start_event = stream.enqueue_event();
// time stepping loop
for(auto step=0; step<nsteps; ++step) {
// TODO perform halo exchange
// x0(:, 0) <- south
// x0(:, 1) -> south
// x0(:, ny-1) <- north
// x0(:, ny-2) -> north
// TODO copy in the kernel launch from diffusion2d.cu
std::swap(x0, x1);
}
auto stop_event = stream.enqueue_event();
stop_event.wait();
copy_to_host<double>(x0, x_host, buffer_size);
double time = stop_event.time_since(start_event);
if(mpi_rank==0) {
std::cout << "## " << time << "s, "
<< nsteps*(nx-2)*(ny-2)*mpi_size / time << " points/second"
<< std::endl << std::endl;
std::cout << "writing to output.bin/bov" << std::endl;
}
write_to_file(nx, ny, x_host, mpi_size, mpi_rank);
MPI_Finalize();
return 0;
}
template <typename T>
__global__
void fill(T *v, T value, int n) {
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid<n) {
v[tid] = value;
}
}
template <typename T>
void fill_gpu(T *v, T value, int n) {
auto block_dim = 192ul;
auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0);
fill<T><<<grid_dim, block_dim>>>(v, value, n);
}
void write_to_file(int nx, int ny, double* data, int mpi_size, int mpi_rank) {
// collect the global solution to the root rank
auto block_size = nx*(ny-2); // discard first and last rows
std::vector<double> data_global(mpi_size*block_size);
MPI_Gather(data+nx, block_size, MPI_DOUBLE,
&data_global[0], block_size, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(mpi_rank==0) {
FILE* output = fopen("output.bin", "w");
fwrite(&data_global[0], sizeof(double), mpi_size* nx * (ny-2), output);
fclose(output);
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << nx << ", " << mpi_size*(ny-2) << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl;
}
}
|
b2812509e67d72a884dce5a12b1d6ab3668e595b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
MIT License
Copyright 2020 Jee W. Choi, Marat Dukhan, and Xing Liu
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so, subject
to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// libraries
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cutil_inline.h>
#include "compute_kernel.h"
// GPU kernel error checking function
void gpu_check_error__srcpos (FILE* fp, const char* filename, size_t line)
{
hipError_t C_E = hipGetLastError ();
if (C_E) {
fprintf (fp, "*** [%s:%lu] CUDA ERROR %d: %s ***\n", filename, line, C_E,
hipGetErrorString (C_E));
fflush (fp);
exit (-1); /* abort program */
}
}
float runTest(int num_threads, int block_size, int num_flops)
{
int num_blocks = (num_threads + block_size - 1) / block_size;
TYPE* h_out;
TYPE* d_out;
// timer
hipEvent_t start, stop;
float total_time_taken;
if(num_blocks <= 65535) {
// allocate memory
h_out = (TYPE*) malloc (num_threads * sizeof (TYPE));
cutilSafeCall (hipMalloc ((void**) &d_out, num_threads * sizeof (TYPE)));
dim3 grid (num_blocks);
dim3 threads (block_size);
// start timer
cutilSafeCall (hipEventCreate (&start));
cutilSafeCall (hipEventCreate (&stop));
cutilSafeCall (hipEventRecord (start, 0));
// execute kernel
switch (num_flops) {
case 1:
hipLaunchKernelGGL(( compute_kernel_1) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 2:
hipLaunchKernelGGL(( compute_kernel_2) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 4:
hipLaunchKernelGGL(( compute_kernel_4) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 8:
hipLaunchKernelGGL(( compute_kernel_8) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 16:
hipLaunchKernelGGL(( compute_kernel_16) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 32:
hipLaunchKernelGGL(( compute_kernel_32) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 64:
hipLaunchKernelGGL(( compute_kernel_64) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 128:
hipLaunchKernelGGL(( compute_kernel_128) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 256:
hipLaunchKernelGGL(( compute_kernel_256) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 512:
hipLaunchKernelGGL(( compute_kernel_512) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 1024:
hipLaunchKernelGGL(( compute_kernel_1024) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 2048:
hipLaunchKernelGGL(( compute_kernel_2048) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
case 4096:
hipLaunchKernelGGL(( compute_kernel_4096) , dim3(grid), dim3(threads), 0, 0, num_threads, num_flops, d_out);
break;
default:
fprintf(stderr, "Invalid num_flops: %d\n", num_flops);
total_time_taken = -1.0f;
break;
}
// end timer
cutilSafeCall (hipEventRecord (stop, 0));
// cutilSafeCall (cutilDeviceSynchronize ());
cutilSafeCall (hipDeviceSynchronize ());
cutilSafeCall (hipEventElapsedTime (&total_time_taken, start, stop));
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
free (h_out);
cutilSafeCall (hipFree (d_out));
fprintf(stderr,
"Execution configuration: %d blocks %d threads %d flops: %f (ms)\n",
num_blocks, block_size, num_flops, total_time_taken);
} else {
total_time_taken = -1.0f;
}
return total_time_taken;
}
void autotune(int* num_threads, int* block_size, int* flops_per_thread)
{
int i, j, k, l;
// structures for storing execution configurations
int *threads;
int *tb;
int *num_flops;
float* total_time_taken;
float* perf;
float best_perf;
// allocate memory for storing kernel execution configuration
threads = (int*) malloc (5 * 3 * 13 * sizeof (int));
tb = (int*) malloc (5 * 3 * 13 * sizeof (int));
num_flops = (int*) malloc (5 * 3 * 13 * sizeof (int));
total_time_taken = (float*) malloc (5 * 3 * 13 * sizeof (float));
perf = (float*) malloc (5 * 3 * 13 * sizeof (float));
// autotune for best execution configuration
l = 0;
// vary # of threads
for(i = 1048576; i <= 16777216; i *= 2) {
// vary thread block size
for(j = 128; j <= 512; j *= 2) {
for(k = 1; k <= 4096; k *= 2) {
threads[l] = i;
tb[l] = j;
num_flops[l] = k;
// wrapper function for the test kernels
total_time_taken[l] = runTest (i, j, k);
if(total_time_taken[l] > 0) {
perf[l] = ((((2.0 * i / 1e6) * k)) / total_time_taken[l]);
} else {
perf[l] = 0.0;
}
l++;
}
}
}
// find best performing configuration
best_perf = perf[0];
// j keeps track of the index to the best configuration
j = 0;
for(int i = 1; i < l; i++) {
if(perf[i] > best_perf) {
best_perf = perf[i];
j = i;
}
}
*num_threads = threads[j];
*block_size = tb[j];
*flops_per_thread = num_flops[j];
free (threads);
free (tb);
free (num_flops);
free (total_time_taken);
free (perf);
}
void writeConfig(char* file_name, int num_threads, int block_size,
int flops_per_thread)
{
FILE* fp = fopen(file_name, "w");
fprintf(fp, "%d\n", num_threads);
fprintf(fp, "%d\n", block_size);
fprintf(fp, "%d\n", flops_per_thread);
fclose (fp);
}
void readConfig(FILE *fp, int* num_threads, int* block_size,
int* flops_per_thread)
{
char line[20];
if(fgets(line, 20, fp) != NULL) {
*num_threads = atoi (line);
} else {
fprintf(stderr, "Can't find number of threads in the config file\n");
exit(0);
}
if(fgets(line, 20, fp) != NULL) {
*block_size = atoi (line);
} else {
fprintf(stderr, "Can't find block size in the config file\n");
exit(0);
}
if(fgets(line, 20, fp) != NULL) {
*flops_per_thread = atoi (line);
} else {
fprintf(stderr, "Can't find flops per thread in the config file\n");
exit(0);
}
}
int validateResults(TYPE* out, int nThreads, int bSize, int nFlops)
{
int i, j, k;
int index;
TYPE tmp;
fprintf(stderr, "generating random numbers from 0 to %d\n", RAND_MAX);
// test 1000 samples
k = 0;
for(i = 0; i < 1000; i++) {
index = rand () % nThreads;
tmp = 1.0 * (index % bSize);
for(j = 0; j < nFlops; j++) {
tmp = tmp + tmp * CONST;
}
if(fabs (tmp - out[index]) > 1e-5) {
k++;
// fprintf(stderr, "%d ==> %f %f\n", index, tmp, out[index]);
} else {
// fprintf(stderr, "%d <== %f %f\n", index, tmp, out[index]);
}
}
return k;
}
int main(int argc, char** argv)
{
int i;
// timer
hipEvent_t start, stop;
float total_time_taken;
// execution parameters
int nThreads;
int bSize;
int nFlops;
int num_blocks;
// memory data structures
TYPE* h_in;
TYPE* d_in;
TYPE* d_out;
TYPE* h_out;
// file
FILE* fp;
if(argc != 2) {
fprintf(stderr, "usage: %s <file name>\n", argv[0]);
exit (0);
}
fp = fopen (argv[1], "r");
if(fp == NULL) {
fprintf(stderr, "File %s does not exist, autotuning...\n", argv[1]);
autotune (&nThreads, &bSize, &nFlops);
writeConfig (argv[1], nThreads, bSize, nFlops);
} else {
fprintf(stderr, "Reading parameters...\n");
readConfig (fp, &nThreads, &bSize, &nFlops);
fclose (fp);
}
fprintf(stderr,
"Best performance at %d threads %d block size %d flops/thread\n",
nThreads, bSize, nFlops);
// find the best GPU in the system
hipSetDevice(cutGetMaxGflopsDeviceId ());
num_blocks = (nThreads + bSize - 1) / bSize;
// allocate memory
h_in = (TYPE*) malloc (nThreads * sizeof (TYPE));
h_out = (TYPE*) malloc (nThreads * sizeof (TYPE));
cutilSafeCall (hipMalloc ((void**) &d_in, nThreads * sizeof (TYPE)));
cutilSafeCall (hipMalloc ((void**) &d_out, nThreads * sizeof (TYPE)));
// initialize memory
for(i = 0; i < nThreads; i++) {
h_in[i] = drand48 ();
}
cutilSafeCall (hipMemcpy (d_in, h_in, nThreads * sizeof (TYPE),
hipMemcpyHostToDevice));
dim3 grid (num_blocks);
dim3 threads (bSize);
// start timer
cutilSafeCall (hipEventCreate (&start));
cutilSafeCall (hipEventCreate (&stop));
cutilSafeCall (hipEventRecord (start, 0));
for(int iter = 0; iter < 1; iter++) {
switch (nFlops) {
case 1:
hipLaunchKernelGGL(( compute_kernel_1) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 2:
hipLaunchKernelGGL(( compute_kernel_2) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 4:
hipLaunchKernelGGL(( compute_kernel_4) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 8:
hipLaunchKernelGGL(( compute_kernel_8) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 16:
hipLaunchKernelGGL(( compute_kernel_16) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 32:
hipLaunchKernelGGL(( compute_kernel_32) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 64:
hipLaunchKernelGGL(( compute_kernel_64) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 128:
hipLaunchKernelGGL(( compute_kernel_128) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 256:
hipLaunchKernelGGL(( compute_kernel_256) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 512:
hipLaunchKernelGGL(( compute_kernel_512) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 1024:
hipLaunchKernelGGL(( compute_kernel_1024) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 2048:
hipLaunchKernelGGL(( compute_kernel_2048) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
case 4096:
hipLaunchKernelGGL(( compute_kernel_4096) , dim3(grid), dim3(threads), 0, 0, nThreads, nFlops, d_out);
break;
default:
fprintf(stderr, "Invalid nFlops: %d\n", nFlops);
total_time_taken = -1.0f;
break;
}
}
// end timer
cutilSafeCall (hipEventRecord (stop, 0));
// cutilSafeCall (cutilDeviceSynchronize ());
cutilSafeCall (hipDeviceSynchronize ());
cutilSafeCall (hipEventElapsedTime (&total_time_taken, start, stop));
cutilSafeCall (hipDeviceSynchronize ());
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
// copy results back
cutilSafeCall (hipMemcpy (h_out, d_out, nThreads * sizeof (TYPE),
hipMemcpyDeviceToHost));
fprintf(stderr, "Results validated: %d\n", validateResults (h_out, nThreads,
bSize, nFlops));
fprintf(stderr, "Time taken to execute %f gflops: %f (ms)\n", (((2.0 *
nThreads / 1e6) * nFlops) / 1e3), total_time_taken);
fprintf(stderr, "Effective performance: %f (GFlops/s)\n",
(((2.0 * nThreads / 1e6) * nFlops)) / total_time_taken);
free (h_out);
cutilSafeCall (hipFree (d_out));
return 0;
}
| b2812509e67d72a884dce5a12b1d6ab3668e595b.cu | /*
MIT License
Copyright 2020 Jee W. Choi, Marat Dukhan, and Xing Liu
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so, subject
to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// libraries
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cutil_inline.h>
#include "compute_kernel.h"
// GPU kernel error checking function
void gpu_check_error__srcpos (FILE* fp, const char* filename, size_t line)
{
cudaError_t C_E = cudaGetLastError ();
if (C_E) {
fprintf (fp, "*** [%s:%lu] CUDA ERROR %d: %s ***\n", filename, line, C_E,
cudaGetErrorString (C_E));
fflush (fp);
exit (-1); /* abort program */
}
}
float runTest(int num_threads, int block_size, int num_flops)
{
int num_blocks = (num_threads + block_size - 1) / block_size;
TYPE* h_out;
TYPE* d_out;
// timer
cudaEvent_t start, stop;
float total_time_taken;
if(num_blocks <= 65535) {
// allocate memory
h_out = (TYPE*) malloc (num_threads * sizeof (TYPE));
cutilSafeCall (cudaMalloc ((void**) &d_out, num_threads * sizeof (TYPE)));
dim3 grid (num_blocks);
dim3 threads (block_size);
// start timer
cutilSafeCall (cudaEventCreate (&start));
cutilSafeCall (cudaEventCreate (&stop));
cutilSafeCall (cudaEventRecord (start, 0));
// execute kernel
switch (num_flops) {
case 1:
compute_kernel_1 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 2:
compute_kernel_2 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 4:
compute_kernel_4 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 8:
compute_kernel_8 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 16:
compute_kernel_16 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 32:
compute_kernel_32 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 64:
compute_kernel_64 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 128:
compute_kernel_128 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 256:
compute_kernel_256 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 512:
compute_kernel_512 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 1024:
compute_kernel_1024 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 2048:
compute_kernel_2048 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
case 4096:
compute_kernel_4096 <<<grid, threads>>> (num_threads, num_flops, d_out);
break;
default:
fprintf(stderr, "Invalid num_flops: %d\n", num_flops);
total_time_taken = -1.0f;
break;
}
// end timer
cutilSafeCall (cudaEventRecord (stop, 0));
// cutilSafeCall (cutilDeviceSynchronize ());
cutilSafeCall (cudaThreadSynchronize ());
cutilSafeCall (cudaEventElapsedTime (&total_time_taken, start, stop));
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
free (h_out);
cutilSafeCall (cudaFree (d_out));
fprintf(stderr,
"Execution configuration: %d blocks %d threads %d flops: %f (ms)\n",
num_blocks, block_size, num_flops, total_time_taken);
} else {
total_time_taken = -1.0f;
}
return total_time_taken;
}
void autotune(int* num_threads, int* block_size, int* flops_per_thread)
{
int i, j, k, l;
// structures for storing execution configurations
int *threads;
int *tb;
int *num_flops;
float* total_time_taken;
float* perf;
float best_perf;
// allocate memory for storing kernel execution configuration
threads = (int*) malloc (5 * 3 * 13 * sizeof (int));
tb = (int*) malloc (5 * 3 * 13 * sizeof (int));
num_flops = (int*) malloc (5 * 3 * 13 * sizeof (int));
total_time_taken = (float*) malloc (5 * 3 * 13 * sizeof (float));
perf = (float*) malloc (5 * 3 * 13 * sizeof (float));
// autotune for best execution configuration
l = 0;
// vary # of threads
for(i = 1048576; i <= 16777216; i *= 2) {
// vary thread block size
for(j = 128; j <= 512; j *= 2) {
for(k = 1; k <= 4096; k *= 2) {
threads[l] = i;
tb[l] = j;
num_flops[l] = k;
// wrapper function for the test kernels
total_time_taken[l] = runTest (i, j, k);
if(total_time_taken[l] > 0) {
perf[l] = ((((2.0 * i / 1e6) * k)) / total_time_taken[l]);
} else {
perf[l] = 0.0;
}
l++;
}
}
}
// find best performing configuration
best_perf = perf[0];
// j keeps track of the index to the best configuration
j = 0;
for(int i = 1; i < l; i++) {
if(perf[i] > best_perf) {
best_perf = perf[i];
j = i;
}
}
*num_threads = threads[j];
*block_size = tb[j];
*flops_per_thread = num_flops[j];
free (threads);
free (tb);
free (num_flops);
free (total_time_taken);
free (perf);
}
void writeConfig(char* file_name, int num_threads, int block_size,
int flops_per_thread)
{
FILE* fp = fopen(file_name, "w");
fprintf(fp, "%d\n", num_threads);
fprintf(fp, "%d\n", block_size);
fprintf(fp, "%d\n", flops_per_thread);
fclose (fp);
}
void readConfig(FILE *fp, int* num_threads, int* block_size,
int* flops_per_thread)
{
char line[20];
if(fgets(line, 20, fp) != NULL) {
*num_threads = atoi (line);
} else {
fprintf(stderr, "Can't find number of threads in the config file\n");
exit(0);
}
if(fgets(line, 20, fp) != NULL) {
*block_size = atoi (line);
} else {
fprintf(stderr, "Can't find block size in the config file\n");
exit(0);
}
if(fgets(line, 20, fp) != NULL) {
*flops_per_thread = atoi (line);
} else {
fprintf(stderr, "Can't find flops per thread in the config file\n");
exit(0);
}
}
int validateResults(TYPE* out, int nThreads, int bSize, int nFlops)
{
int i, j, k;
int index;
TYPE tmp;
fprintf(stderr, "generating random numbers from 0 to %d\n", RAND_MAX);
// test 1000 samples
k = 0;
for(i = 0; i < 1000; i++) {
index = rand () % nThreads;
tmp = 1.0 * (index % bSize);
for(j = 0; j < nFlops; j++) {
tmp = tmp + tmp * CONST;
}
if(fabs (tmp - out[index]) > 1e-5) {
k++;
// fprintf(stderr, "%d ==> %f %f\n", index, tmp, out[index]);
} else {
// fprintf(stderr, "%d <== %f %f\n", index, tmp, out[index]);
}
}
return k;
}
int main(int argc, char** argv)
{
int i;
// timer
cudaEvent_t start, stop;
float total_time_taken;
// execution parameters
int nThreads;
int bSize;
int nFlops;
int num_blocks;
// memory data structures
TYPE* h_in;
TYPE* d_in;
TYPE* d_out;
TYPE* h_out;
// file
FILE* fp;
if(argc != 2) {
fprintf(stderr, "usage: %s <file name>\n", argv[0]);
exit (0);
}
fp = fopen (argv[1], "r");
if(fp == NULL) {
fprintf(stderr, "File %s does not exist, autotuning...\n", argv[1]);
autotune (&nThreads, &bSize, &nFlops);
writeConfig (argv[1], nThreads, bSize, nFlops);
} else {
fprintf(stderr, "Reading parameters...\n");
readConfig (fp, &nThreads, &bSize, &nFlops);
fclose (fp);
}
fprintf(stderr,
"Best performance at %d threads %d block size %d flops/thread\n",
nThreads, bSize, nFlops);
// find the best GPU in the system
cudaSetDevice(cutGetMaxGflopsDeviceId ());
num_blocks = (nThreads + bSize - 1) / bSize;
// allocate memory
h_in = (TYPE*) malloc (nThreads * sizeof (TYPE));
h_out = (TYPE*) malloc (nThreads * sizeof (TYPE));
cutilSafeCall (cudaMalloc ((void**) &d_in, nThreads * sizeof (TYPE)));
cutilSafeCall (cudaMalloc ((void**) &d_out, nThreads * sizeof (TYPE)));
// initialize memory
for(i = 0; i < nThreads; i++) {
h_in[i] = drand48 ();
}
cutilSafeCall (cudaMemcpy (d_in, h_in, nThreads * sizeof (TYPE),
cudaMemcpyHostToDevice));
dim3 grid (num_blocks);
dim3 threads (bSize);
// start timer
cutilSafeCall (cudaEventCreate (&start));
cutilSafeCall (cudaEventCreate (&stop));
cutilSafeCall (cudaEventRecord (start, 0));
for(int iter = 0; iter < 1; iter++) {
switch (nFlops) {
case 1:
compute_kernel_1 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 2:
compute_kernel_2 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 4:
compute_kernel_4 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 8:
compute_kernel_8 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 16:
compute_kernel_16 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 32:
compute_kernel_32 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 64:
compute_kernel_64 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 128:
compute_kernel_128 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 256:
compute_kernel_256 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 512:
compute_kernel_512 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 1024:
compute_kernel_1024 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 2048:
compute_kernel_2048 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
case 4096:
compute_kernel_4096 <<<grid, threads>>> (nThreads, nFlops, d_out);
break;
default:
fprintf(stderr, "Invalid nFlops: %d\n", nFlops);
total_time_taken = -1.0f;
break;
}
}
// end timer
cutilSafeCall (cudaEventRecord (stop, 0));
// cutilSafeCall (cutilDeviceSynchronize ());
cutilSafeCall (cudaThreadSynchronize ());
cutilSafeCall (cudaEventElapsedTime (&total_time_taken, start, stop));
cutilSafeCall (cudaThreadSynchronize ());
gpu_check_error__srcpos (stderr, __FILE__, __LINE__);
// copy results back
cutilSafeCall (cudaMemcpy (h_out, d_out, nThreads * sizeof (TYPE),
cudaMemcpyDeviceToHost));
fprintf(stderr, "Results validated: %d\n", validateResults (h_out, nThreads,
bSize, nFlops));
fprintf(stderr, "Time taken to execute %f gflops: %f (ms)\n", (((2.0 *
nThreads / 1e6) * nFlops) / 1e3), total_time_taken);
fprintf(stderr, "Effective performance: %f (GFlops/s)\n",
(((2.0 * nThreads / 1e6) * nFlops)) / total_time_taken);
free (h_out);
cutilSafeCall (cudaFree (d_out));
return 0;
}
|
a1a69430d75f233527c3ddcd00dc7535b0424481.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "matrix.h"
#include "nv_wavenet.cuh"
#include "nv_wavenet_util.cuh"
#include "nv_wavenet_reference.h"
#include <assert.h>
#include <stdio.h>
#include <vector>
Matrix* createMatrix(int r, int c) {
float mean = 0.0;
float scale = 0.5 / r;
Matrix* m = new Matrix(r,c,false);
m->randomize(mean,scale);
return m;
}
template <typename T_weight, typename T_data, int R, int S, int A>
void runTest(int num_layers, int max_dilation, int batch_size, int num_iterations, int samples_per_iteration, int impl, bool inputsFromDevice=false, bool weightsFromDevice=false) {
float mean = 0.0;
float scale = 0.5 / R;
// Just encode one-hot vector as an integer
std::vector<int> yInPrev(batch_size);
std::vector<int> yInCur(batch_size);
for (int b=0; b<batch_size; b++) {
yInPrev[b] = rand() % A;
yInCur[b] = rand() % A;
}
std::vector<int> yOut(batch_size);
Matrix outputSelectors(batch_size,samples_per_iteration);
outputSelectors.randomize(0.5,1.0);
Matrix embeddingsPrev(R,A,false);
Matrix embeddingsCur(R,A,false);
embeddingsPrev.randomize(mean,scale);
embeddingsCur.randomize(mean,scale);
std::vector<Matrix*> Wprev(num_layers);
std::vector<Matrix*> Wcur(num_layers);
std::vector<Matrix*> Bh(num_layers);
std::vector<Matrix*> Wres(num_layers);
std::vector<Matrix*> Bres(num_layers);
std::vector<Matrix*> Wskip(num_layers);
std::vector<Matrix*> Bskip(num_layers);
std::vector<Matrix*> skipOut(num_layers+1);
// Retain results for dilated inputs
std::vector<std::vector<Matrix*>> Xt(samples_per_iteration);
for (int sample=0; sample<samples_per_iteration; sample++) {
Xt[sample].resize(num_layers+1);
}
for (int l=0; l<num_layers; l++) {
// Weights
Wprev[l] = createMatrix(2*R,R);
Wcur[l] = createMatrix(2*R,R);
Bh[l] = createMatrix(2*R,1);
Wres[l] = createMatrix(R,R);
Bres[l] = createMatrix(R,1);
Wskip[l] = createMatrix(S,R);
Bskip[l] = createMatrix(S,1);
// Activations
skipOut[l] = createMatrix(S,batch_size);
}
for (int sample=0; sample<samples_per_iteration; sample++) {
for (int layer=0; layer<num_layers+1; layer++) {
Xt[sample][layer] = createMatrix(R, batch_size);
}
}
Matrix WskipOut(A,S,false);
WskipOut.randomize(mean,scale);
Matrix BskipOut(A,1,false);
BskipOut.randomize(mean, scale);
Matrix Wout(A,A,false);
Wout.randomize(mean,scale);
Matrix Bout(A,1,false);
Bout.randomize(mean,scale);
Matrix skipOutFinal(A,batch_size,false);
Matrix out(A,batch_size,false);
Matrix p(A,batch_size,false);
Matrix zero(S,batch_size,false);
for (int row = 0; row < S; row++) {
for (int col = 0; col < batch_size; col++) {
zero.set(row,col,0.f);
}
}
nvWavenetReference ref(num_layers, batch_size, samples_per_iteration, R, S, A, max_dilation);
nvWavenetInfer<T_weight,T_data,R,S,A>* infer = new nvWavenetInfer<T_weight,T_data,R,S,A>(num_layers, max_dilation, batch_size, samples_per_iteration, impl);
ref.setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
ref.setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
ref.setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
if (weightsFromDevice) {
float* d_embeddingsPrev;
float* d_embeddingsCur;
gpuErrChk(hipMalloc(&d_embeddingsPrev, R*A*sizeof(float)));
gpuErrChk(hipMemcpy(d_embeddingsPrev, embeddingsPrev.data(), R*A*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_embeddingsCur, R*A*sizeof(float)));
gpuErrChk(hipMemcpy(d_embeddingsCur, embeddingsCur.data(), R*A*sizeof(float), hipMemcpyHostToDevice));
infer->setEmbeddings(d_embeddingsPrev, d_embeddingsCur);
gpuErrChk(hipFree(d_embeddingsPrev));
gpuErrChk(hipFree(d_embeddingsCur));
float* d_Wprev;
float* d_Wcur;
float* d_Bh;
float* d_Wres;
float* d_Bres;
float* d_Wskip;
float* d_Bskip;
for (int l=0; l<num_layers; l++) {
gpuErrChk(hipMalloc(&d_Wprev, 2*R*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wprev, Wprev[l]->data(), 2*R*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wcur, 2*R*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wcur, Wcur[l]->data(), 2*R*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bh, 2*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bh, Bh[l]->data(), 2*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wres, R*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wres, Wres[l]->data(), R*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bres, R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bres, Bres[l]->data(), R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wskip, S*R*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wskip, Wskip[l]->data(), S*R*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bskip, S*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bskip, Bskip[l]->data(), S*sizeof(float), hipMemcpyHostToDevice));
infer->setLayerWeights(l, d_Wprev, d_Wcur, d_Bh, d_Wres, d_Bres, d_Wskip, d_Bskip);
gpuErrChk(hipFree(d_Wprev));
gpuErrChk(hipFree(d_Wcur));
gpuErrChk(hipFree(d_Bh));
gpuErrChk(hipFree(d_Wres));
gpuErrChk(hipFree(d_Bres));
gpuErrChk(hipFree(d_Wskip));
gpuErrChk(hipFree(d_Bskip));
}
float* d_WskipOut;
float* d_BskipOut;
float* d_Wout;
float* d_Bout;
gpuErrChk(hipMalloc(&d_WskipOut, A*S*sizeof(float)));
gpuErrChk(hipMemcpy(d_WskipOut, WskipOut.data(), A*S*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_BskipOut, A*sizeof(float)));
gpuErrChk(hipMemcpy(d_BskipOut, BskipOut.data(), A*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Wout, A*A*sizeof(float)));
gpuErrChk(hipMemcpy(d_Wout, Wout.data(), A*A*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMalloc(&d_Bout, A*sizeof(float)));
gpuErrChk(hipMemcpy(d_Bout, Bout.data(), A*sizeof(float), hipMemcpyHostToDevice));
infer->setOutWeights(d_WskipOut, d_BskipOut, d_Wout, d_Bout);
gpuErrChk(hipFree(d_WskipOut));
gpuErrChk(hipFree(d_BskipOut));
gpuErrChk(hipFree(d_Wout));
gpuErrChk(hipFree(d_Bout));
}
else {
infer->setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
infer->setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
infer->setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
}
Matrix zeroMatrix(R,batch_size,false);
for (int row=0; row<R; row++) {
for (int col=0; col<batch_size; col++) {
zeroMatrix.set(row,col,0.f);
}
}
Matrix Lh(2*R,samples_per_iteration*num_layers*batch_size);
assert(Lh.data());
Lh.randomize(mean,scale);
ref.setInputs(Lh.data(), outputSelectors.data());
if (inputsFromDevice) {
float* d_Lh;
gpuErrChk(hipMalloc(&d_Lh, 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float)));
float* d_outputSelectors;
gpuErrChk(hipMalloc(&d_outputSelectors,samples_per_iteration*batch_size*sizeof(float)));
gpuErrChk(hipMemcpy(d_Lh, Lh.data(), 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(d_outputSelectors, outputSelectors.data(), samples_per_iteration*batch_size*sizeof(float), hipMemcpyHostToDevice));
infer->setInputs(d_Lh, d_outputSelectors);
gpuErrChk(hipFree(d_Lh));
gpuErrChk(hipFree(d_outputSelectors));
}
else {
infer->setInputs(Lh.data(), outputSelectors.data());
}
for (int i=0; i<num_iterations; i++) {
printf("Iteration: %d\n", i);
// Run reference implementation
int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1;
int* refYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
int* mcYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
ref.run(samples_per_iteration, batch_size, refYout);
assert(infer->run(samples_per_iteration, batch_size, mcYout, batch_size_per_block, true));
gpuErrChk(hipDeviceSynchronize());
// Check results
for (int l=0; l<num_layers; l++) {
printf("Checking layer %d\n", l);
Matrix refXout(R,batch_size);
Matrix refSkipOut(S, batch_size);
ref.getXtOut(l, refXout.data());
ref.getSkipOut(l, refSkipOut.data());
Matrix mcXout(R,batch_size,false);
Matrix mcSkipOut(S,batch_size,false);
infer->getXtOut(l, mcXout.data());
infer->getSkipOut(l, mcSkipOut.data());
matrix_compare("Xout", refXout, mcXout, 1.e-3);
matrix_compare("skipOut", refSkipOut, mcSkipOut, 1.e-2, true);
}
Matrix refSkipOutFinal(A,batch_size);
ref.getZs(refSkipOutFinal.data());
Matrix mcSkipOutFinal(A,batch_size,false);
infer->getZs(mcSkipOutFinal.data());
matrix_compare("Zs", refSkipOutFinal, mcSkipOutFinal, 1.e-4, true);
Matrix refOut(A,batch_size);
ref.getZa(refOut.data());
Matrix mcOut(A,batch_size,false);
infer->getZa(mcOut.data());
matrix_compare("Za", refOut, mcOut, 1.e-4);
Matrix refP(A,batch_size);
ref.getP(refP.data());
Matrix mcP(A,batch_size,false);
infer->getP(mcP.data());
matrix_compare("p",refP,mcP,1.e-3);
printf("Comparing yOut\n");
for (int i=0; i<samples_per_iteration*batch_size; i++) {
assert(refYout[i] == mcYout[i]);
}
printf("SUCCESS!\n");
}
// Clean up
delete infer;
for (int l=0; l<num_layers; l++) {
delete Wprev[l];
delete Wcur[l];
delete Bh[l];
delete Wres[l];
delete Bres[l];
delete Wskip[l];
delete Bskip[l];
for (int sample=0; sample<samples_per_iteration;sample++) {
delete Xt[sample][l];
}
delete skipOut[l];
}
}
int main(int argc, char* argv[]) {
int num_layers = 20;
int batch_size = 16;
if (argc > 1) num_layers = atoi(argv[1]);
if (argc > 2) batch_size = atoi(argv[2]);
// How many samples to generate each time we invoke the kernel
const int SAMPLES_PER_ITERATION = 8;
const int MAX_DILATION = SAMPLES_PER_ITERATION;
srand(3);
printf("Testing R=32, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
printf("Testing R=64, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, true, false);
printf(" Testing Dual-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, false, true);
printf(" Testing Persistent\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, true, true);
printf("Testing R=64, S=256\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
}
| a1a69430d75f233527c3ddcd00dc7535b0424481.cu | /******************************************************************************
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include "matrix.h"
#include "nv_wavenet.cuh"
#include "nv_wavenet_util.cuh"
#include "nv_wavenet_reference.h"
#include <assert.h>
#include <stdio.h>
#include <vector>
Matrix* createMatrix(int r, int c) {
float mean = 0.0;
float scale = 0.5 / r;
Matrix* m = new Matrix(r,c,false);
m->randomize(mean,scale);
return m;
}
template <typename T_weight, typename T_data, int R, int S, int A>
void runTest(int num_layers, int max_dilation, int batch_size, int num_iterations, int samples_per_iteration, int impl, bool inputsFromDevice=false, bool weightsFromDevice=false) {
float mean = 0.0;
float scale = 0.5 / R;
// Just encode one-hot vector as an integer
std::vector<int> yInPrev(batch_size);
std::vector<int> yInCur(batch_size);
for (int b=0; b<batch_size; b++) {
yInPrev[b] = rand() % A;
yInCur[b] = rand() % A;
}
std::vector<int> yOut(batch_size);
Matrix outputSelectors(batch_size,samples_per_iteration);
outputSelectors.randomize(0.5,1.0);
Matrix embeddingsPrev(R,A,false);
Matrix embeddingsCur(R,A,false);
embeddingsPrev.randomize(mean,scale);
embeddingsCur.randomize(mean,scale);
std::vector<Matrix*> Wprev(num_layers);
std::vector<Matrix*> Wcur(num_layers);
std::vector<Matrix*> Bh(num_layers);
std::vector<Matrix*> Wres(num_layers);
std::vector<Matrix*> Bres(num_layers);
std::vector<Matrix*> Wskip(num_layers);
std::vector<Matrix*> Bskip(num_layers);
std::vector<Matrix*> skipOut(num_layers+1);
// Retain results for dilated inputs
std::vector<std::vector<Matrix*>> Xt(samples_per_iteration);
for (int sample=0; sample<samples_per_iteration; sample++) {
Xt[sample].resize(num_layers+1);
}
for (int l=0; l<num_layers; l++) {
// Weights
Wprev[l] = createMatrix(2*R,R);
Wcur[l] = createMatrix(2*R,R);
Bh[l] = createMatrix(2*R,1);
Wres[l] = createMatrix(R,R);
Bres[l] = createMatrix(R,1);
Wskip[l] = createMatrix(S,R);
Bskip[l] = createMatrix(S,1);
// Activations
skipOut[l] = createMatrix(S,batch_size);
}
for (int sample=0; sample<samples_per_iteration; sample++) {
for (int layer=0; layer<num_layers+1; layer++) {
Xt[sample][layer] = createMatrix(R, batch_size);
}
}
Matrix WskipOut(A,S,false);
WskipOut.randomize(mean,scale);
Matrix BskipOut(A,1,false);
BskipOut.randomize(mean, scale);
Matrix Wout(A,A,false);
Wout.randomize(mean,scale);
Matrix Bout(A,1,false);
Bout.randomize(mean,scale);
Matrix skipOutFinal(A,batch_size,false);
Matrix out(A,batch_size,false);
Matrix p(A,batch_size,false);
Matrix zero(S,batch_size,false);
for (int row = 0; row < S; row++) {
for (int col = 0; col < batch_size; col++) {
zero.set(row,col,0.f);
}
}
nvWavenetReference ref(num_layers, batch_size, samples_per_iteration, R, S, A, max_dilation);
nvWavenetInfer<T_weight,T_data,R,S,A>* infer = new nvWavenetInfer<T_weight,T_data,R,S,A>(num_layers, max_dilation, batch_size, samples_per_iteration, impl);
ref.setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
ref.setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
ref.setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
if (weightsFromDevice) {
float* d_embeddingsPrev;
float* d_embeddingsCur;
gpuErrChk(cudaMalloc(&d_embeddingsPrev, R*A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_embeddingsPrev, embeddingsPrev.data(), R*A*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_embeddingsCur, R*A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_embeddingsCur, embeddingsCur.data(), R*A*sizeof(float), cudaMemcpyHostToDevice));
infer->setEmbeddings(d_embeddingsPrev, d_embeddingsCur);
gpuErrChk(cudaFree(d_embeddingsPrev));
gpuErrChk(cudaFree(d_embeddingsCur));
float* d_Wprev;
float* d_Wcur;
float* d_Bh;
float* d_Wres;
float* d_Bres;
float* d_Wskip;
float* d_Bskip;
for (int l=0; l<num_layers; l++) {
gpuErrChk(cudaMalloc(&d_Wprev, 2*R*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wprev, Wprev[l]->data(), 2*R*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wcur, 2*R*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wcur, Wcur[l]->data(), 2*R*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bh, 2*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bh, Bh[l]->data(), 2*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wres, R*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wres, Wres[l]->data(), R*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bres, R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bres, Bres[l]->data(), R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wskip, S*R*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wskip, Wskip[l]->data(), S*R*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bskip, S*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bskip, Bskip[l]->data(), S*sizeof(float), cudaMemcpyHostToDevice));
infer->setLayerWeights(l, d_Wprev, d_Wcur, d_Bh, d_Wres, d_Bres, d_Wskip, d_Bskip);
gpuErrChk(cudaFree(d_Wprev));
gpuErrChk(cudaFree(d_Wcur));
gpuErrChk(cudaFree(d_Bh));
gpuErrChk(cudaFree(d_Wres));
gpuErrChk(cudaFree(d_Bres));
gpuErrChk(cudaFree(d_Wskip));
gpuErrChk(cudaFree(d_Bskip));
}
float* d_WskipOut;
float* d_BskipOut;
float* d_Wout;
float* d_Bout;
gpuErrChk(cudaMalloc(&d_WskipOut, A*S*sizeof(float)));
gpuErrChk(cudaMemcpy(d_WskipOut, WskipOut.data(), A*S*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_BskipOut, A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_BskipOut, BskipOut.data(), A*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Wout, A*A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Wout, Wout.data(), A*A*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMalloc(&d_Bout, A*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Bout, Bout.data(), A*sizeof(float), cudaMemcpyHostToDevice));
infer->setOutWeights(d_WskipOut, d_BskipOut, d_Wout, d_Bout);
gpuErrChk(cudaFree(d_WskipOut));
gpuErrChk(cudaFree(d_BskipOut));
gpuErrChk(cudaFree(d_Wout));
gpuErrChk(cudaFree(d_Bout));
}
else {
infer->setEmbeddings(embeddingsPrev.data(), embeddingsCur.data());
for (int l=0; l<num_layers; l++) {
infer->setLayerWeights(l, Wprev[l]->data(), Wcur[l]->data(), Bh[l]->data(), Wres[l]->data(), Bres[l]->data(), Wskip[l]->data(), Bskip[l]->data());
}
infer->setOutWeights(WskipOut.data(), BskipOut.data(), Wout.data(), Bout.data());
}
Matrix zeroMatrix(R,batch_size,false);
for (int row=0; row<R; row++) {
for (int col=0; col<batch_size; col++) {
zeroMatrix.set(row,col,0.f);
}
}
Matrix Lh(2*R,samples_per_iteration*num_layers*batch_size);
assert(Lh.data());
Lh.randomize(mean,scale);
ref.setInputs(Lh.data(), outputSelectors.data());
if (inputsFromDevice) {
float* d_Lh;
gpuErrChk(cudaMalloc(&d_Lh, 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float)));
float* d_outputSelectors;
gpuErrChk(cudaMalloc(&d_outputSelectors,samples_per_iteration*batch_size*sizeof(float)));
gpuErrChk(cudaMemcpy(d_Lh, Lh.data(), 2*R*samples_per_iteration*num_layers*batch_size*sizeof(float), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(d_outputSelectors, outputSelectors.data(), samples_per_iteration*batch_size*sizeof(float), cudaMemcpyHostToDevice));
infer->setInputs(d_Lh, d_outputSelectors);
gpuErrChk(cudaFree(d_Lh));
gpuErrChk(cudaFree(d_outputSelectors));
}
else {
infer->setInputs(Lh.data(), outputSelectors.data());
}
for (int i=0; i<num_iterations; i++) {
printf("Iteration: %d\n", i);
// Run reference implementation
int batch_size_per_block = ((batch_size % 4) == 0) ? 4 : ((batch_size % 2) == 0) ? 2 : 1;
int* refYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
int* mcYout = (int*)malloc(samples_per_iteration*batch_size*sizeof(int));
ref.run(samples_per_iteration, batch_size, refYout);
assert(infer->run(samples_per_iteration, batch_size, mcYout, batch_size_per_block, true));
gpuErrChk(cudaDeviceSynchronize());
// Check results
for (int l=0; l<num_layers; l++) {
printf("Checking layer %d\n", l);
Matrix refXout(R,batch_size);
Matrix refSkipOut(S, batch_size);
ref.getXtOut(l, refXout.data());
ref.getSkipOut(l, refSkipOut.data());
Matrix mcXout(R,batch_size,false);
Matrix mcSkipOut(S,batch_size,false);
infer->getXtOut(l, mcXout.data());
infer->getSkipOut(l, mcSkipOut.data());
matrix_compare("Xout", refXout, mcXout, 1.e-3);
matrix_compare("skipOut", refSkipOut, mcSkipOut, 1.e-2, true);
}
Matrix refSkipOutFinal(A,batch_size);
ref.getZs(refSkipOutFinal.data());
Matrix mcSkipOutFinal(A,batch_size,false);
infer->getZs(mcSkipOutFinal.data());
matrix_compare("Zs", refSkipOutFinal, mcSkipOutFinal, 1.e-4, true);
Matrix refOut(A,batch_size);
ref.getZa(refOut.data());
Matrix mcOut(A,batch_size,false);
infer->getZa(mcOut.data());
matrix_compare("Za", refOut, mcOut, 1.e-4);
Matrix refP(A,batch_size);
ref.getP(refP.data());
Matrix mcP(A,batch_size,false);
infer->getP(mcP.data());
matrix_compare("p",refP,mcP,1.e-3);
printf("Comparing yOut\n");
for (int i=0; i<samples_per_iteration*batch_size; i++) {
assert(refYout[i] == mcYout[i]);
}
printf("SUCCESS!\n");
}
// Clean up
delete infer;
for (int l=0; l<num_layers; l++) {
delete Wprev[l];
delete Wcur[l];
delete Bh[l];
delete Wres[l];
delete Bres[l];
delete Wskip[l];
delete Bskip[l];
for (int sample=0; sample<samples_per_iteration;sample++) {
delete Xt[sample][l];
}
delete skipOut[l];
}
}
int main(int argc, char* argv[]) {
int num_layers = 20;
int batch_size = 16;
if (argc > 1) num_layers = atoi(argv[1]);
if (argc > 2) batch_size = atoi(argv[2]);
// How many samples to generate each time we invoke the kernel
const int SAMPLES_PER_ITERATION = 8;
const int MAX_DILATION = SAMPLES_PER_ITERATION;
srand(3);
printf("Testing R=32, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,32,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
printf("Testing R=64, S=128\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1, true, false);
printf(" Testing Dual-Block\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2, false, true);
printf(" Testing Persistent\n");
runTest<float,float,64,128, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3, true, true);
printf("Testing R=64, S=256\n");
printf(" Testing Single-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 1);
printf(" Testing Dual-Block\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 2);
printf(" Testing Persistent\n");
runTest<float,float,64,256, 256>(num_layers, MAX_DILATION, batch_size, 2, SAMPLES_PER_ITERATION, 3);
}
|
ca587fe94f8b144417e0b6129960f97cf9769d42.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "propagateCarries.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_matrix = NULL;
hipMalloc(&d_matrix, XSIZE*YSIZE);
int numCols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
propagateCarries), dim3(gridBlock),dim3(threadBlock), 0, 0, d_matrix,numCols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
propagateCarries), dim3(gridBlock),dim3(threadBlock), 0, 0, d_matrix,numCols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
propagateCarries), dim3(gridBlock),dim3(threadBlock), 0, 0, d_matrix,numCols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ca587fe94f8b144417e0b6129960f97cf9769d42.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "propagateCarries.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_matrix = NULL;
cudaMalloc(&d_matrix, XSIZE*YSIZE);
int numCols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
propagateCarries<<<gridBlock,threadBlock>>>(d_matrix,numCols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
propagateCarries<<<gridBlock,threadBlock>>>(d_matrix,numCols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
propagateCarries<<<gridBlock,threadBlock>>>(d_matrix,numCols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4ae9206208f225c197994e53c1756a57e5e492a9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int d, int* odata, int* idata)
{
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
int start = powf(2.0,1.0*(d-1));
if(k>=start)
odata[k] = idata[k-start]+idata[k];
else
odata[k] = idata[k];
}
__global__ void kernInclu2Exclu(int n, int* odata, int* idata)
{
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
if(k==0)
odata[0] = 0;
else
odata[k] = idata[k-1];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int* dev_idata;
int* dev_odata;
int* tmp;
int blockSize = 16;
hipMalloc((void**)&dev_idata, n*sizeof(int));
hipMalloc((void**)&dev_odata, n*sizeof(int));
hipMemcpy(dev_idata, idata, n*sizeof(int), hipMemcpyHostToDevice);
//checkCUDAErrorWithLine("copy idata failed!");
hipMemcpy(dev_odata, odata, n*sizeof(int), hipMemcpyHostToDevice);
//checkCUDAErrorWithLine("copy odata failed!");
dim3 fullBlocksPerGrid((n+blockSize-1)/blockSize);
timer().startGpuTimer();
// TODO
for(int d=1;d<=ilog2ceil(n);d++)
{
hipLaunchKernelGGL(( kernNaiveScan), dim3(blockSize),dim3(fullBlocksPerGrid), 0, 0, n,d,dev_odata,dev_idata);
tmp = dev_odata;
dev_odata = dev_idata;
dev_idata = tmp;
}
//from inclusive to exclusive
hipLaunchKernelGGL(( kernInclu2Exclu), dim3(blockSize),dim3(fullBlocksPerGrid), 0, 0, n,dev_odata,dev_idata);
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, n*sizeof(int), hipMemcpyDeviceToHost);
//checkCUDAErrorWithLine("get odata failed!");
hipFree(dev_idata); hipFree(dev_odata);
}
}
}
| 4ae9206208f225c197994e53c1756a57e5e492a9.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int d, int* odata, int* idata)
{
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
int start = powf(2.0,1.0*(d-1));
if(k>=start)
odata[k] = idata[k-start]+idata[k];
else
odata[k] = idata[k];
}
__global__ void kernInclu2Exclu(int n, int* odata, int* idata)
{
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
if(k==0)
odata[0] = 0;
else
odata[k] = idata[k-1];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int* dev_idata;
int* dev_odata;
int* tmp;
int blockSize = 16;
cudaMalloc((void**)&dev_idata, n*sizeof(int));
cudaMalloc((void**)&dev_odata, n*sizeof(int));
cudaMemcpy(dev_idata, idata, n*sizeof(int), cudaMemcpyHostToDevice);
//checkCUDAErrorWithLine("copy idata failed!");
cudaMemcpy(dev_odata, odata, n*sizeof(int), cudaMemcpyHostToDevice);
//checkCUDAErrorWithLine("copy odata failed!");
dim3 fullBlocksPerGrid((n+blockSize-1)/blockSize);
timer().startGpuTimer();
// TODO
for(int d=1;d<=ilog2ceil(n);d++)
{
kernNaiveScan<<<blockSize,fullBlocksPerGrid>>>(n,d,dev_odata,dev_idata);
tmp = dev_odata;
dev_odata = dev_idata;
dev_idata = tmp;
}
//from inclusive to exclusive
kernInclu2Exclu<<<blockSize,fullBlocksPerGrid>>>(n,dev_odata,dev_idata);
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, n*sizeof(int), cudaMemcpyDeviceToHost);
//checkCUDAErrorWithLine("get odata failed!");
cudaFree(dev_idata); cudaFree(dev_odata);
}
}
}
|
8223fd70feac4c7acdc697849e66c82d4a9abdd4.hip | // !!! This is a file automatically generated by hipify!!!
/*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 2 . 1
! ---------------------------------------
!
! Main authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA and CNRS / INRIA / University of Pau
! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau
! July 2012
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include <rocblas.h>
#include "config.h"
#include "mesh_constants_cuda.h"
//#define CUBLAS_ERROR(s,n) if (s != HIPBLAS_STATUS_SUCCESS) { \
//fprintf (stderr, "CUBLAS Memory Write Error @ %d\n",n); \
//exit(EXIT_FAILURE); }
/* ----------------------------------------------------------------------------------------------- */
// elastic wavefield
/* ----------------------------------------------------------------------------------------------- */
__global__ void UpdateDispVeloc_kernel(realw* displ,
realw* veloc,
realw* accel,
int size,
realw deltat,
realw deltatsqover2,
realw deltatover2) {
// two dimensional array of blocks on grid where each block has one dimensional array of threads
//int tid = threadIdx.x;
//int bx = blockIdx.y*gridDim.x+blockIdx.x;
//int id = tid + bx*blockDim.x;
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
displ[id] = displ[id] + deltat*veloc[id] + deltatsqover2*accel[id];
veloc[id] = veloc[id] + deltatover2*accel[id];
accel[id] = 0.0f; // can do this using memset...not sure if faster,probably not
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(it_update_displacement_cuda,
IT_UPDATE_DISPLACMENT_CUDA)(long* Mesh_pointer,
realw* deltat_F,
realw* deltatsqover2_F,
realw* deltatover2_F,
realw* b_deltat_F,
realw* b_deltatsqover2_F,
realw* b_deltatover2_F) {
TRACE("\tit_update_displacement_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
realw deltat = *deltat_F;
realw deltatsqover2 = *deltatsqover2_F;
realw deltatover2 = *deltatover2_F;
int size = NDIM * mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL1;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// debug
//realw max_d,max_v,max_a;
//max_d = get_device_array_maximum_value(mp->d_displ, size);
//max_v = get_device_array_maximum_value(mp->d_veloc, size);
//max_a = get_device_array_maximum_value(mp->d_accel, size);
//printf("rank %d - max displ: %f veloc: %f accel: %f\n",mp->myrank,max_d,max_v,max_a);
//launch kernel
hipLaunchKernelGGL(( UpdateDispVeloc_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_displ,mp->d_veloc,mp->d_accel,
size,deltat,deltatsqover2,deltatover2);
// kernel for backward fields
if(mp->simulation_type == 3) {
realw b_deltat = *b_deltat_F;
realw b_deltatsqover2 = *b_deltatsqover2_F;
realw b_deltatover2 = *b_deltatover2_F;
hipLaunchKernelGGL(( UpdateDispVeloc_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_displ,mp->d_b_veloc,mp->d_b_accel,
size,b_deltat,b_deltatsqover2,b_deltatover2);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("it_update_displacement_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// acoustic wavefield
// KERNEL 1
/* ----------------------------------------------------------------------------------------------- */
__global__ void UpdatePotential_kernel(realw* potential_acoustic,
realw* potential_dot_acoustic,
realw* potential_dot_dot_acoustic,
int size,
realw deltat,
realw deltatsqover2,
realw deltatover2) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
potential_acoustic[id] = potential_acoustic[id]
+ deltat*potential_dot_acoustic[id]
+ deltatsqover2*potential_dot_dot_acoustic[id];
potential_dot_acoustic[id] = potential_dot_acoustic[id]
+ deltatover2*potential_dot_dot_acoustic[id];
potential_dot_dot_acoustic[id] = 0.0f;
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(it_update_displacement_ac_cuda,
it_update_displacement_ac_cuda)(long* Mesh_pointer,
realw* deltat_F,
realw* deltatsqover2_F,
realw* deltatover2_F,
realw* b_deltat_F,
realw* b_deltatsqover2_F,
realw* b_deltatover2_F) {
TRACE("\tit_update_displacement_ac_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL1;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//launch kernel
// forward wavefields
realw deltat = *deltat_F;
realw deltatsqover2 = *deltatsqover2_F;
realw deltatover2 = *deltatover2_F;
hipLaunchKernelGGL(( UpdatePotential_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_potential_acoustic,
mp->d_potential_dot_acoustic,
mp->d_potential_dot_dot_acoustic,
size,deltat,deltatsqover2,deltatover2);
// backward/reconstructed wavefields
if(mp->simulation_type == 3) {
realw b_deltat = *b_deltat_F;
realw b_deltatsqover2 = *b_deltatsqover2_F;
realw b_deltatover2 = *b_deltatover2_F;
hipLaunchKernelGGL(( UpdatePotential_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_potential_acoustic,
mp->d_b_potential_dot_acoustic,
mp->d_b_potential_dot_dot_acoustic,
size,b_deltat,b_deltatsqover2,b_deltatover2);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("it_update_displacement_ac_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// elastic domains
// KERNEL 3
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_cuda_device(realw* veloc,
realw* accel,
int size,
realw deltatover2,
realw* rmassx,
realw* rmassy,
realw* rmassz) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
accel[3*id] = accel[3*id]*rmassx[id];
accel[3*id+1] = accel[3*id+1]*rmassy[id];
accel[3*id+2] = accel[3*id+2]*rmassz[id];
veloc[3*id] = veloc[3*id] + deltatover2*accel[3*id];
veloc[3*id+1] = veloc[3*id+1] + deltatover2*accel[3*id+1];
veloc[3*id+2] = veloc[3*id+2] + deltatover2*accel[3*id+2];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_accel_cuda_device(realw* accel,
int size,
realw* rmassx,
realw* rmassy,
realw* rmassz) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
accel[3*id] = accel[3*id]*rmassx[id];
accel[3*id+1] = accel[3*id+1]*rmassy[id];
accel[3*id+2] = accel[3*id+2]*rmassz[id];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_veloc_cuda_device(realw* veloc,
realw* accel,
int size,
realw deltatover2) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
veloc[3*id] = veloc[3*id] + deltatover2*accel[3*id];
veloc[3*id+1] = veloc[3*id+1] + deltatover2*accel[3*id+1];
veloc[3*id+2] = veloc[3*id+2] + deltatover2*accel[3*id+2];
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_a_cuda,
KERNEL_3_A_CUDA)(long* Mesh_pointer,
realw* deltatover2_F,
realw* b_deltatover2_F,
int* APPROXIMATE_OCEAN_LOAD) {
TRACE("\tkernel_3_a_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// check whether we can update accel and veloc, or only accel at this point
if( *APPROXIMATE_OCEAN_LOAD == 0 ){
realw deltatover2 = *deltatover2_F;
// updates both, accel and veloc
hipLaunchKernelGGL(( kernel_3_cuda_device), dim3(grid), dim3(threads),0,mp->compute_stream, mp->d_veloc,
mp->d_accel,
size, deltatover2,
mp->d_rmassx,mp->d_rmassy,mp->d_rmassz);
if(mp->simulation_type == 3) {
realw b_deltatover2 = *b_deltatover2_F;
hipLaunchKernelGGL(( kernel_3_cuda_device), dim3(grid), dim3(threads),0,mp->compute_stream, mp->d_b_veloc,
mp->d_b_accel,
size, b_deltatover2,
mp->d_rmassx,mp->d_rmassy,mp->d_rmassz);
}
}else{
// updates only accel
hipLaunchKernelGGL(( kernel_3_accel_cuda_device), dim3(grid), dim3(threads),0,mp->compute_stream, mp->d_accel,
size,
mp->d_rmassx,
mp->d_rmassy,
mp->d_rmassz);
if(mp->simulation_type == 3) {
hipLaunchKernelGGL(( kernel_3_accel_cuda_device), dim3(grid), dim3(threads),0,mp->compute_stream, mp->d_b_accel,
size,
mp->d_rmassx,
mp->d_rmassy,
mp->d_rmassz);
}
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 a");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_b_cuda,
KERNEL_3_B_CUDA)(long* Mesh_pointer,
realw* deltatover2_F,
realw* b_deltatover2_F) {
TRACE("\tkernel_3_b_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
realw deltatover2 = *deltatover2_F;
// updates only veloc at this point
hipLaunchKernelGGL(( kernel_3_veloc_cuda_device), dim3(grid), dim3(threads),0,mp->compute_stream, mp->d_veloc,
mp->d_accel,
size,deltatover2);
if(mp->simulation_type == 3) {
realw b_deltatover2 = *b_deltatover2_F;
hipLaunchKernelGGL(( kernel_3_veloc_cuda_device), dim3(grid), dim3(threads),0,mp->compute_stream, mp->d_b_veloc,
mp->d_b_accel,
size,b_deltatover2);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 b");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// acoustic domains
// KERNEL 3
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_a_acoustic_cuda_device(realw* potential_dot_dot_acoustic,
int size,
realw* rmass_acoustic) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
// multiplies pressure with the inverse of the mass matrix
potential_dot_dot_acoustic[id] = potential_dot_dot_acoustic[id]*rmass_acoustic[id];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_b_acoustic_cuda_device(realw* potential_dot_acoustic,
realw* potential_dot_dot_acoustic,
int size,
realw deltatover2,
realw* rmass_acoustic) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
// Newmark time scheme: corrector term
potential_dot_acoustic[id] = potential_dot_acoustic[id] + deltatover2*potential_dot_dot_acoustic[id];
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_a_acoustic_cuda,
KERNEL_3_ACOUSTIC_CUDA)(long* Mesh_pointer ) {
TRACE("kernel_3_a_acoustic_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
hipLaunchKernelGGL(( kernel_3_a_acoustic_cuda_device), dim3(grid), dim3(threads), 0, 0, mp->d_potential_dot_dot_acoustic,
size,
mp->d_rmass_acoustic);
if(mp->simulation_type == 3) {
hipLaunchKernelGGL(( kernel_3_a_acoustic_cuda_device), dim3(grid), dim3(threads), 0, 0, mp->d_b_potential_dot_dot_acoustic,
size,
mp->d_rmass_acoustic);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 a");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_b_acoustic_cuda,
KERNEL_3_ACOUSTIC_CUDA)(long* Mesh_pointer,
realw* deltatover2_F,
realw* b_deltatover2_F) {
TRACE("kernel_3_b_acoustic_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
realw deltatover2 = *deltatover2_F;
hipLaunchKernelGGL(( kernel_3_b_acoustic_cuda_device), dim3(grid), dim3(threads), 0, 0, mp->d_potential_dot_acoustic,
mp->d_potential_dot_dot_acoustic,
size, deltatover2,
mp->d_rmass_acoustic);
if(mp->simulation_type == 3) {
realw b_deltatover2 = *b_deltatover2_F;
hipLaunchKernelGGL(( kernel_3_b_acoustic_cuda_device), dim3(grid), dim3(threads), 0, 0, mp->d_b_potential_dot_acoustic,
mp->d_b_potential_dot_dot_acoustic,
size, b_deltatover2,
mp->d_rmass_acoustic);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 b");
#endif
}
| 8223fd70feac4c7acdc697849e66c82d4a9abdd4.cu | /*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 2 . 1
! ---------------------------------------
!
! Main authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA and CNRS / INRIA / University of Pau
! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau
! July 2012
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include <stdio.h>
#include <cuda.h>
//#include <cublas.h>
#include "config.h"
#include "mesh_constants_cuda.h"
//#define CUBLAS_ERROR(s,n) if (s != CUBLAS_STATUS_SUCCESS) { \
//fprintf (stderr, "CUBLAS Memory Write Error @ %d\n",n); \
//exit(EXIT_FAILURE); }
/* ----------------------------------------------------------------------------------------------- */
// elastic wavefield
/* ----------------------------------------------------------------------------------------------- */
__global__ void UpdateDispVeloc_kernel(realw* displ,
realw* veloc,
realw* accel,
int size,
realw deltat,
realw deltatsqover2,
realw deltatover2) {
// two dimensional array of blocks on grid where each block has one dimensional array of threads
//int tid = threadIdx.x;
//int bx = blockIdx.y*gridDim.x+blockIdx.x;
//int id = tid + bx*blockDim.x;
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
displ[id] = displ[id] + deltat*veloc[id] + deltatsqover2*accel[id];
veloc[id] = veloc[id] + deltatover2*accel[id];
accel[id] = 0.0f; // can do this using memset...not sure if faster,probably not
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(it_update_displacement_cuda,
IT_UPDATE_DISPLACMENT_CUDA)(long* Mesh_pointer,
realw* deltat_F,
realw* deltatsqover2_F,
realw* deltatover2_F,
realw* b_deltat_F,
realw* b_deltatsqover2_F,
realw* b_deltatover2_F) {
TRACE("\tit_update_displacement_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
realw deltat = *deltat_F;
realw deltatsqover2 = *deltatsqover2_F;
realw deltatover2 = *deltatover2_F;
int size = NDIM * mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL1;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// debug
//realw max_d,max_v,max_a;
//max_d = get_device_array_maximum_value(mp->d_displ, size);
//max_v = get_device_array_maximum_value(mp->d_veloc, size);
//max_a = get_device_array_maximum_value(mp->d_accel, size);
//printf("rank %d - max displ: %f veloc: %f accel: %f\n",mp->myrank,max_d,max_v,max_a);
//launch kernel
UpdateDispVeloc_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_displ,mp->d_veloc,mp->d_accel,
size,deltat,deltatsqover2,deltatover2);
// kernel for backward fields
if(mp->simulation_type == 3) {
realw b_deltat = *b_deltat_F;
realw b_deltatsqover2 = *b_deltatsqover2_F;
realw b_deltatover2 = *b_deltatover2_F;
UpdateDispVeloc_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_displ,mp->d_b_veloc,mp->d_b_accel,
size,b_deltat,b_deltatsqover2,b_deltatover2);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("it_update_displacement_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// acoustic wavefield
// KERNEL 1
/* ----------------------------------------------------------------------------------------------- */
__global__ void UpdatePotential_kernel(realw* potential_acoustic,
realw* potential_dot_acoustic,
realw* potential_dot_dot_acoustic,
int size,
realw deltat,
realw deltatsqover2,
realw deltatover2) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
potential_acoustic[id] = potential_acoustic[id]
+ deltat*potential_dot_acoustic[id]
+ deltatsqover2*potential_dot_dot_acoustic[id];
potential_dot_acoustic[id] = potential_dot_acoustic[id]
+ deltatover2*potential_dot_dot_acoustic[id];
potential_dot_dot_acoustic[id] = 0.0f;
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(it_update_displacement_ac_cuda,
it_update_displacement_ac_cuda)(long* Mesh_pointer,
realw* deltat_F,
realw* deltatsqover2_F,
realw* deltatover2_F,
realw* b_deltat_F,
realw* b_deltatsqover2_F,
realw* b_deltatover2_F) {
TRACE("\tit_update_displacement_ac_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL1;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//launch kernel
// forward wavefields
realw deltat = *deltat_F;
realw deltatsqover2 = *deltatsqover2_F;
realw deltatover2 = *deltatover2_F;
UpdatePotential_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_potential_acoustic,
mp->d_potential_dot_acoustic,
mp->d_potential_dot_dot_acoustic,
size,deltat,deltatsqover2,deltatover2);
// backward/reconstructed wavefields
if(mp->simulation_type == 3) {
realw b_deltat = *b_deltat_F;
realw b_deltatsqover2 = *b_deltatsqover2_F;
realw b_deltatover2 = *b_deltatover2_F;
UpdatePotential_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_potential_acoustic,
mp->d_b_potential_dot_acoustic,
mp->d_b_potential_dot_dot_acoustic,
size,b_deltat,b_deltatsqover2,b_deltatover2);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("it_update_displacement_ac_cuda");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// elastic domains
// KERNEL 3
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_cuda_device(realw* veloc,
realw* accel,
int size,
realw deltatover2,
realw* rmassx,
realw* rmassy,
realw* rmassz) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
accel[3*id] = accel[3*id]*rmassx[id];
accel[3*id+1] = accel[3*id+1]*rmassy[id];
accel[3*id+2] = accel[3*id+2]*rmassz[id];
veloc[3*id] = veloc[3*id] + deltatover2*accel[3*id];
veloc[3*id+1] = veloc[3*id+1] + deltatover2*accel[3*id+1];
veloc[3*id+2] = veloc[3*id+2] + deltatover2*accel[3*id+2];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_accel_cuda_device(realw* accel,
int size,
realw* rmassx,
realw* rmassy,
realw* rmassz) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
accel[3*id] = accel[3*id]*rmassx[id];
accel[3*id+1] = accel[3*id+1]*rmassy[id];
accel[3*id+2] = accel[3*id+2]*rmassz[id];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_veloc_cuda_device(realw* veloc,
realw* accel,
int size,
realw deltatover2) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
veloc[3*id] = veloc[3*id] + deltatover2*accel[3*id];
veloc[3*id+1] = veloc[3*id+1] + deltatover2*accel[3*id+1];
veloc[3*id+2] = veloc[3*id+2] + deltatover2*accel[3*id+2];
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_a_cuda,
KERNEL_3_A_CUDA)(long* Mesh_pointer,
realw* deltatover2_F,
realw* b_deltatover2_F,
int* APPROXIMATE_OCEAN_LOAD) {
TRACE("\tkernel_3_a_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// check whether we can update accel and veloc, or only accel at this point
if( *APPROXIMATE_OCEAN_LOAD == 0 ){
realw deltatover2 = *deltatover2_F;
// updates both, accel and veloc
kernel_3_cuda_device<<< grid, threads,0,mp->compute_stream>>>(mp->d_veloc,
mp->d_accel,
size, deltatover2,
mp->d_rmassx,mp->d_rmassy,mp->d_rmassz);
if(mp->simulation_type == 3) {
realw b_deltatover2 = *b_deltatover2_F;
kernel_3_cuda_device<<< grid, threads,0,mp->compute_stream>>>(mp->d_b_veloc,
mp->d_b_accel,
size, b_deltatover2,
mp->d_rmassx,mp->d_rmassy,mp->d_rmassz);
}
}else{
// updates only accel
kernel_3_accel_cuda_device<<< grid, threads,0,mp->compute_stream>>>(mp->d_accel,
size,
mp->d_rmassx,
mp->d_rmassy,
mp->d_rmassz);
if(mp->simulation_type == 3) {
kernel_3_accel_cuda_device<<< grid, threads,0,mp->compute_stream>>>(mp->d_b_accel,
size,
mp->d_rmassx,
mp->d_rmassy,
mp->d_rmassz);
}
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 a");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_b_cuda,
KERNEL_3_B_CUDA)(long* Mesh_pointer,
realw* deltatover2_F,
realw* b_deltatover2_F) {
TRACE("\tkernel_3_b_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
realw deltatover2 = *deltatover2_F;
// updates only veloc at this point
kernel_3_veloc_cuda_device<<< grid, threads,0,mp->compute_stream>>>(mp->d_veloc,
mp->d_accel,
size,deltatover2);
if(mp->simulation_type == 3) {
realw b_deltatover2 = *b_deltatover2_F;
kernel_3_veloc_cuda_device<<< grid, threads,0,mp->compute_stream>>>(mp->d_b_veloc,
mp->d_b_accel,
size,b_deltatover2);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 b");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// acoustic domains
// KERNEL 3
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_a_acoustic_cuda_device(realw* potential_dot_dot_acoustic,
int size,
realw* rmass_acoustic) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
// multiplies pressure with the inverse of the mass matrix
potential_dot_dot_acoustic[id] = potential_dot_dot_acoustic[id]*rmass_acoustic[id];
}
}
/* ----------------------------------------------------------------------------------------------- */
__global__ void kernel_3_b_acoustic_cuda_device(realw* potential_dot_acoustic,
realw* potential_dot_dot_acoustic,
int size,
realw deltatover2,
realw* rmass_acoustic) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
// because of block and grid sizing problems, there is a small
// amount of buffer at the end of the calculation
if(id < size) {
// Newmark time scheme: corrector term
potential_dot_acoustic[id] = potential_dot_acoustic[id] + deltatover2*potential_dot_dot_acoustic[id];
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_a_acoustic_cuda,
KERNEL_3_ACOUSTIC_CUDA)(long* Mesh_pointer ) {
TRACE("kernel_3_a_acoustic_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
kernel_3_a_acoustic_cuda_device<<< grid, threads>>>(mp->d_potential_dot_dot_acoustic,
size,
mp->d_rmass_acoustic);
if(mp->simulation_type == 3) {
kernel_3_a_acoustic_cuda_device<<< grid, threads>>>(mp->d_b_potential_dot_dot_acoustic,
size,
mp->d_rmass_acoustic);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 a");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(kernel_3_b_acoustic_cuda,
KERNEL_3_ACOUSTIC_CUDA)(long* Mesh_pointer,
realw* deltatover2_F,
realw* b_deltatover2_F) {
TRACE("kernel_3_b_acoustic_cuda");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int size = mp->NGLOB_AB;
int blocksize = BLOCKSIZE_KERNEL3;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
realw deltatover2 = *deltatover2_F;
kernel_3_b_acoustic_cuda_device<<< grid, threads>>>(mp->d_potential_dot_acoustic,
mp->d_potential_dot_dot_acoustic,
size, deltatover2,
mp->d_rmass_acoustic);
if(mp->simulation_type == 3) {
realw b_deltatover2 = *b_deltatover2_F;
kernel_3_b_acoustic_cuda_device<<< grid, threads>>>(mp->d_b_potential_dot_acoustic,
mp->d_b_potential_dot_dot_acoustic,
size, b_deltatover2,
mp->d_rmass_acoustic);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//printf("checking updatedispl_kernel launch...with %dx%d blocks\n",num_blocks_x,num_blocks_y);
exit_on_cuda_error("after kernel 3 b");
#endif
}
|
7b9f0e33b63400403928ee59498a5ca9203eb65b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2018 XIAOLIN WANG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "WeightFactory.h"
#include "Global.h"
#include "cublasWrapper.h"
#include "utils.h"
namespace cytonLib {
WeightFactory weightFactory;
void WeightFactory::init(const string& method)
{
if(method=="adam")
{
optAdam=true;
adamGamma=0.9;
adamGamma2=0.999999;
adamEpsilon=1e-9;
}
else if(method=="SGD")
{
optSgd=true;
}
else
{
assert(false);
}
}
void WeightFactory::create(Weight& weight, string tag, int ni, int nj)
{
weight.create(tag, ni, nj);
weights.push_back(&weight);
}
void WeightFactory::alloc(Precision clipGradient)
{
int length=0;
for(int i=0;i<weights.size();i++)
{
Weight& w=*weights.at(i);
fprintf(stderr, "weight%d %s %d*%d\n", i, w.tag.c_str(), w.ni, w.nj);
length+=w.length();
}
whole.resize(length, 1);
whole.clipGrad=clipGradient;
fprintf(stderr, "totalWeight %d\n",length);
int offset=0;
for(vector<Weight*>::iterator iw=weights.begin();iw!=weights.end();iw++)
{
Weight& w=*(*iw);
w.set(w.ni, w.ni, w.nj, whole.data+offset, whole.grad.data+offset);
offset+=w.length();
}
whole.initRandom(-global.initFactor, global.initFactor);
if(optAdam)
{
momentum.resize(whole.ni, whole.nj);
momentum.setZero();
gradientVariance.resize(whole.ni, whole.nj);
gradientVariance.setZero();
dWeight.resize(whole.ni, whole.nj);
}
else if(optSgd)
{
}
else
{
assert(false);
}
}
void WeightFactory::clearGrad()
{
whole.grad.setZero();
}
__global__
void weightFactory_update_adam(Precision* grad, Precision* gradMomentum, Precision* gradVar, Precision* weight, Precision* dWeight, int len,
Precision gamma,Precision gamma2, Precision epsilon, Precision lambda )
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<len)
{
Precision& g=grad[i];
Precision& gm=gradMomentum[i];
Precision& gv=gradVar[i];
Precision& w=weight[i];
Precision& dw=dWeight[i];
gm=(1-gamma)*g+gamma*gm;
gv=(1-gamma2)*g*g+gamma2*gv;
dw= gm/(sqrt(gv)+epsilon)*lambda;
w += dw;
}
}
void WeightFactory::update(Precision lambda)
{
int len=whole.length();
Precision pnFactor=sqrt(1.0/whole.length());
if(whole.clipGrad>0)
{
whole.grad.clip(whole.clipGrad);
}
if(optAdam)
{
Precision step=global.batch;
Precision tf=sqrt(1.0-::pow(adamGamma2, step)) / (1.0-::pow(adamGamma, step));
hipLaunchKernelGGL(( weightFactory_update_adam), dim3(ceil(len, blockSize)), dim3(blockSize), 0, 0, whole.grad.data, momentum.data, gradientVariance.data, whole.data, dWeight.data, len,
adamGamma, adamGamma2, adamEpsilon, lambda*tf);
}
else if(optSgd)
{
checkError(cublasXaxpy(global.cublasHandle, whole.length(), &lambda, whole.grad.data, 1, whole.data, 1));
}
else
{
assert(false);
}
}
void WeightFactory::save(const string& fileName)
{
XLLib::dirPrepare4file(fileName);
std::ofstream f(fileName.c_str());
f<<"##"<<"WeightFactory"<<"\n";
whole.save(f);
f.close();
}
void WeightFactory::load(const string& fileName)
{
if(!XLLib::fileExists(fileName))
{
fprintf(stderr, "Error: model file %s does not exist.\n", fileName.c_str());
assert(false);
}
ifstream f(fileName.c_str());
string tTag=string("##WeightFactory");
checkFile(f,tTag);
whole.load(f);
f.close();
}
} /* namespace cytonLib */
| 7b9f0e33b63400403928ee59498a5ca9203eb65b.cu | /*
Copyright 2018 XIAOLIN WANG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "WeightFactory.h"
#include "Global.h"
#include "cublasWrapper.h"
#include "utils.h"
namespace cytonLib {
WeightFactory weightFactory;
void WeightFactory::init(const string& method)
{
if(method=="adam")
{
optAdam=true;
adamGamma=0.9;
adamGamma2=0.999999;
adamEpsilon=1e-9;
}
else if(method=="SGD")
{
optSgd=true;
}
else
{
assert(false);
}
}
void WeightFactory::create(Weight& weight, string tag, int ni, int nj)
{
weight.create(tag, ni, nj);
weights.push_back(&weight);
}
void WeightFactory::alloc(Precision clipGradient)
{
int length=0;
for(int i=0;i<weights.size();i++)
{
Weight& w=*weights.at(i);
fprintf(stderr, "weight%d %s %d*%d\n", i, w.tag.c_str(), w.ni, w.nj);
length+=w.length();
}
whole.resize(length, 1);
whole.clipGrad=clipGradient;
fprintf(stderr, "totalWeight %d\n",length);
int offset=0;
for(vector<Weight*>::iterator iw=weights.begin();iw!=weights.end();iw++)
{
Weight& w=*(*iw);
w.set(w.ni, w.ni, w.nj, whole.data+offset, whole.grad.data+offset);
offset+=w.length();
}
whole.initRandom(-global.initFactor, global.initFactor);
if(optAdam)
{
momentum.resize(whole.ni, whole.nj);
momentum.setZero();
gradientVariance.resize(whole.ni, whole.nj);
gradientVariance.setZero();
dWeight.resize(whole.ni, whole.nj);
}
else if(optSgd)
{
}
else
{
assert(false);
}
}
void WeightFactory::clearGrad()
{
whole.grad.setZero();
}
__global__
void weightFactory_update_adam(Precision* grad, Precision* gradMomentum, Precision* gradVar, Precision* weight, Precision* dWeight, int len,
Precision gamma,Precision gamma2, Precision epsilon, Precision lambda )
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
if(i<len)
{
Precision& g=grad[i];
Precision& gm=gradMomentum[i];
Precision& gv=gradVar[i];
Precision& w=weight[i];
Precision& dw=dWeight[i];
gm=(1-gamma)*g+gamma*gm;
gv=(1-gamma2)*g*g+gamma2*gv;
dw= gm/(sqrt(gv)+epsilon)*lambda;
w += dw;
}
}
void WeightFactory::update(Precision lambda)
{
int len=whole.length();
Precision pnFactor=sqrt(1.0/whole.length());
if(whole.clipGrad>0)
{
whole.grad.clip(whole.clipGrad);
}
if(optAdam)
{
Precision step=global.batch;
Precision tf=sqrt(1.0-std::pow(adamGamma2, step)) / (1.0-std::pow(adamGamma, step));
weightFactory_update_adam<<<ceil(len, blockSize), blockSize>>>(whole.grad.data, momentum.data, gradientVariance.data, whole.data, dWeight.data, len,
adamGamma, adamGamma2, adamEpsilon, lambda*tf);
}
else if(optSgd)
{
checkError(cublasXaxpy(global.cublasHandle, whole.length(), &lambda, whole.grad.data, 1, whole.data, 1));
}
else
{
assert(false);
}
}
void WeightFactory::save(const string& fileName)
{
XLLib::dirPrepare4file(fileName);
std::ofstream f(fileName.c_str());
f<<"##"<<"WeightFactory"<<"\n";
whole.save(f);
f.close();
}
void WeightFactory::load(const string& fileName)
{
if(!XLLib::fileExists(fileName))
{
fprintf(stderr, "Error: model file %s does not exist.\n", fileName.c_str());
assert(false);
}
ifstream f(fileName.c_str());
string tTag=string("##WeightFactory");
checkFile(f,tTag);
whole.load(f);
f.close();
}
} /* namespace cytonLib */
|
5ce81e5f3387de2681daa472064a2e90c99b6210.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
}
#define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1)
#define ROTATE_UP(val,MAX) ((val+1)%MAX)
/**
* GPU Device kernel for the for 2D stencil
* First attempt during hackaton
* M = Rows, N = Cols INCLUDING HALOS
* In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows
*/
__global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N)
{
//Declaring the shared memory array for source
extern __shared__ double shared_mem[];
double * shSrc = shared_mem;
//indexes
int i, j;
//neighbor's values
double north, south, east, west;
//SharedMem Collumns Dimension
int smColDim = HALO*2+blockDim.y*TILE_SIZE;
int smRowDim = HALO*2+blockDim.x*TILE_SIZE;
//Copying to shared memory
//Inner part
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
shSrc[shMemIndex]=src[globalIndex];
}
}
//Halos
if (threadIdx.x == 0 && threadIdx.y == 0 )
{
int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo;
//For Bottom and top row
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smColDim ; j++ )
{
indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j;
indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j;
shSrc[i*smColDim+j] = src[indexTopHalo];
shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo];
}
}
//For right and left Columns
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smRowDim-HALO*2; j ++ )
{
indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i;
indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i;
shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo];
shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo];
}
}
}
__syncthreads();
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
//Getting the neighbohrs
north = shSrc[shMemIndex-smColDim];
south = shSrc[shMemIndex+smColDim];
east = shSrc[shMemIndex+1];
west = shSrc[shMemIndex-1];
//Real Stencil operation
dst[globalIndex] = ( north + south + east + west )/5.5;
// dst[globalIndex] = ( north + south + east + west )/4;
}
}
__syncthreads();
} | 5ce81e5f3387de2681daa472064a2e90c99b6210.cu | #include "includes.h"
extern "C" {
}
#define ROTATE_DOWN(val,MAX) ((val-1==-1)?MAX-1:val-1)
#define ROTATE_UP(val,MAX) ((val+1)%MAX)
/**
* GPU Device kernel for the for 2D stencil
* First attempt during hackaton
* M = Rows, N = Cols INCLUDING HALOS
* In this version now we replace the size of the shared memory to be just 3 rows (actually 1+HALO*2) rows
*/
__global__ void gpu_stencil2D_4pt(double * dst, double * src, int M, int N)
{
//Declaring the shared memory array for source
extern __shared__ double shared_mem[];
double * shSrc = shared_mem;
//indexes
int i, j;
//neighbor's values
double north, south, east, west;
//SharedMem Collumns Dimension
int smColDim = HALO*2+blockDim.y*TILE_SIZE;
int smRowDim = HALO*2+blockDim.x*TILE_SIZE;
//Copying to shared memory
//Inner part
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
shSrc[shMemIndex]=src[globalIndex];
}
}
//Halos
if (threadIdx.x == 0 && threadIdx.y == 0 )
{
int indexTopHalo, indexBottomHalo, indexLeftHalo, indexRightHalo;
//For Bottom and top row
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smColDim ; j++ )
{
indexTopHalo = (blockIdx.x*blockDim.x*TILE_SIZE+i)*N + (blockIdx.y*blockDim.y*TILE_SIZE) + j;
indexBottomHalo = (HALO + (blockIdx.x+1)*blockDim.x*TILE_SIZE)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+j;
shSrc[i*smColDim+j] = src[indexTopHalo];
shSrc[(HALO+blockDim.x*TILE_SIZE+i)*smColDim + j] = src[indexBottomHalo];
}
}
//For right and left Columns
for ( i = 0 ; i < HALO ; i++ )
{
for ( j = 0 ; j < smRowDim-HALO*2; j ++ )
{
indexLeftHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + (blockIdx.y*blockDim.y*TILE_SIZE)+i;
indexRightHalo = (HALO+blockIdx.x*blockDim.x*TILE_SIZE+j)*N + ((blockIdx.y+1)*blockDim.y*TILE_SIZE)+HALO+i;
shSrc[(HALO+j)*smColDim+i] = src[indexLeftHalo];
shSrc[(HALO+j+1)*smColDim-HALO+i] = src[indexRightHalo];
}
}
}
__syncthreads();
for ( i = 0 ; i < TILE_SIZE ; i++ )
{
for ( j = 0 ; j < TILE_SIZE ; j++ )
{
int globalIndex=HALO*N+blockIdx.x*blockDim.x*TILE_SIZE*N+threadIdx.x*TILE_SIZE*N+i*N+blockIdx.y*blockDim.y*TILE_SIZE+threadIdx.y*TILE_SIZE+j+HALO;
int shMemIndex=HALO*smColDim+threadIdx.x*smColDim*TILE_SIZE+i*smColDim+HALO+threadIdx.y*TILE_SIZE+j;
//Getting the neighbohrs
north = shSrc[shMemIndex-smColDim];
south = shSrc[shMemIndex+smColDim];
east = shSrc[shMemIndex+1];
west = shSrc[shMemIndex-1];
//Real Stencil operation
dst[globalIndex] = ( north + south + east + west )/5.5;
// dst[globalIndex] = ( north + south + east + west )/4;
}
}
__syncthreads();
} |
6b84614b8cdeaa86ccac310173a5c70c755dfffe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ float backwardSigmoid (float forward, float chain)
{
return forward * (1.0 - forward) * chain;
}
extern "C"
__global__ void backwardSigmoidKernel (int length, float *forward, float *chain, float *destination)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
destination[index] = backwardSigmoid(forward[index], chain[index]);
}
} | 6b84614b8cdeaa86ccac310173a5c70c755dfffe.cu | __device__ float backwardSigmoid (float forward, float chain)
{
return forward * (1.0 - forward) * chain;
}
extern "C"
__global__ void backwardSigmoidKernel (int length, float *forward, float *chain, float *destination)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
destination[index] = backwardSigmoid(forward[index], chain[index]);
}
} |
70d72c97e7b3380fe798bd9ead3d1c9387389712.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///////////////////////////////////////////////////////////////////////////
// Square: //
//////////////////////////////////////////////////////////////////////////
#include "square.h"
//Illegal for CUDA __global__ function (i.e. kernel) to be defined as class member function
//You can call a kernel in a struct or class member function, but kernal cannot be member function itself
__global__ void square(float * d_out, float * d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
Square::Square()
{
}
Square::~Square()
{
}
void Square::RunSquare(float * h_out, float * h_in, const int ARRAY_SIZE)
{
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//declare GPU memory pointers
float * d_in;
float * d_out;
//allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out, ARRAY_BYTES);
//transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
//launch the kernel
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( square), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
timer.Stop();
std::cout << "Done: " << timer.Elapsed() << "ms" << std::endl;
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
//print out the resulting array
for(int i=0; i<ARRAY_SIZE; i++)
{
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
//free GPU memory allocation
hipFree(d_in);
hipFree(d_out);
}
| 70d72c97e7b3380fe798bd9ead3d1c9387389712.cu | ///////////////////////////////////////////////////////////////////////////
// Square: //
//////////////////////////////////////////////////////////////////////////
#include "square.h"
//Illegal for CUDA __global__ function (i.e. kernel) to be defined as class member function
//You can call a kernel in a struct or class member function, but kernal cannot be member function itself
__global__ void square(float * d_out, float * d_in)
{
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
Square::Square()
{
}
Square::~Square()
{
}
void Square::RunSquare(float * h_out, float * h_in, const int ARRAY_SIZE)
{
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//declare GPU memory pointers
float * d_in;
float * d_out;
//allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
//transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//launch the kernel
GpuTimer timer;
timer.Start();
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
timer.Stop();
std::cout << "Done: " << timer.Elapsed() << "ms" << std::endl;
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//print out the resulting array
for(int i=0; i<ARRAY_SIZE; i++)
{
printf("%f", h_out[i]);
printf(((i%4) != 3) ? "\t" : "\n");
}
//free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
}
|
6f36cfce968fc16c41224f774fd8b0b50c41ceef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki, Wei Hu
// =============================================================================
//
// Class for performing time integration in fluid system.
// =============================================================================
#include "chrono_fsi/physics/ChFluidDynamics.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
using std::cout;
using std::endl;
namespace chrono {
namespace fsi {
// -----------------------------------------------------------------------------
// Device function to calculate the share of density influence on a given
// particle from all other particle in a given cell
__device__ void collideCellDensityReInit(Real& numerator,
Real& denominator,
int3 gridPos,
uint index,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real4 rhoPreMuB = sortedRhoPreMu[j];
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML)
continue;
numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w);
denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w);
}
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along x
__global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y += paramsD.deltaPress.x;
return;
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y -= paramsD.deltaPress.x;
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply inlet/outlet BC along x
__global__ void ApplyInletBoundaryXKernel(Real4* posRadD,
Real3* VelMassD,
Real4* rhoPresMuD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (rhoPresMu.w > 0.0)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w <= 0.0) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
if (rhoPresMu.w <= -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x > -paramsD.x_in)
rhoPresMuD[index].y = 0;
if (posRad.x < paramsD.x_in)
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along y
__global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.y > paramsD.cMax.y) {
posRad.y -= (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.y < paramsD.cMin.y) {
posRad.y += (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along z
__global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.z > paramsD.cMax.z) {
posRad.z -= (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.z < paramsD.cMin.z) {
posRad.z += (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to keep particle inside the simulation domain
__global__ void ApplyOutOfBoundaryKernel(Real4* posRadD,
Real4* rhoPresMuD,
Real3* velMasD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real3 vel = mR3(velMasD[index]);
Real h = posRadD[index].w;
if (posRad.x > 0.5 * paramsD.boxDimX)
posRad.x = 0.5 * paramsD.boxDimX;
if (posRad.x < -0.5 * paramsD.boxDimX)
posRad.x = -0.5 * paramsD.boxDimX;
if (posRad.y > 0.5 * paramsD.boxDimY)
posRad.y = 0.5 * paramsD.boxDimY;
if (posRad.y < -0.5 * paramsD.boxDimY)
posRad.y = -0.5 * paramsD.boxDimY;
if (posRad.z > 1.0 * paramsD.boxDimZ)
posRad.z = 1.0 * paramsD.boxDimZ;
if (posRad.z < -0.0 * paramsD.boxDimZ)
posRad.z = -0.0 * paramsD.boxDimZ;
posRadD[index] = mR4(posRad, h);
velMasD[index] = mR3(vel);
return;
}
// -----------------------------------------------------------------------------
// Kernel to update the fluid properities. It updates the stress tensor,
// density, velocity and position relying on explicit Euler scheme.
// Pressure is obtained from the density and an Equation of State.
__global__ void UpdateFluidD(Real4* posRadD,
Real3* velMasD,
Real4* rhoPresMuD,
Real3* tauXxYyZzD,
Real3* tauXyXzYzD,
Real3* vel_XSPH_D,
Real4* derivVelRhoD,
Real3* derivTauXxYyZzD,
Real3* derivTauXyXzYzD,
Real4* sr_tau_I_mu_iD,
uint* activityIdentifierD,
uint* freeSurfaceIdD,
int2 updatePortion,
Real dT,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return;
Real4 derivVelRho = derivVelRhoD[index];
Real4 rhoPresMu = rhoPresMuD[index];
Real h = posRadD[index].w;
Real p_tr, p_n;
if (rhoPresMu.w < 0) {
// This is only implemented for granular material
if (paramsD.elastic_SPH) {
//--------------------------------
// ** total stress tau
//--------------------------------
Real3 tauXxYyZz = tauXxYyZzD[index];
Real3 tauXyXzYz = tauXyXzYzD[index];
Real3 derivTauXxYyZz = derivTauXxYyZzD[index];
Real3 derivTauXyXzYz = derivTauXyXzYzD[index];
Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT;
Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT;
// check if there is a plastic flow
p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z);
tauXxYyZz.x += p_n;
tauXxYyZz.y += p_n;
tauXxYyZz.z += p_n;
p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z);
updatedTauXxYyZz.x += p_tr;
updatedTauXxYyZz.y += p_tr;
updatedTauXxYyZz.z += p_tr;
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) +
2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z);
Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) +
2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z);
tau_tr = sqrt(0.5 * tau_tr);
tau_n = sqrt(0.5 * tau_n);
Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT;
// should use the positive magnitude according to "A
// constitutive law for dense granular flows" Nature 2006
Real mu_s = paramsD.mu_fric_s;
Real mu_2 = paramsD.mu_fric_2;
// Real s_0 = mu_s * p_tr;
// Real s_2 = mu_2 * p_tr;
// Real xi = 1.1;
Real dia = paramsD.ave_diam;
Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);//
Real I = Chi * dia * sqrt( paramsD.rho0 / ( p_tr + 1.0e9 ) );
Real coh = paramsD.Coh_coeff;
// Real Chi_cri = 0.1;
// if (Chi < Chi_cri){
// coh = paramsD.Coh_coeff * (1.0 - sin(-1.57 + 3.14 * (Chi / Chi_cri))) / 2.0;
// // coh = paramsD.Coh_coeff * (1.0 - I / I_cri);
// } else {
// coh = 0.0;
// }
Real inv_mus = 1.0 / paramsD.mu_fric_s;
Real p_cri = - coh * inv_mus;
if (p_tr > p_cri) {
Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0e-9) / (I0 + I + 1.0e-9);
// Real G0 = paramsD.G_shear;
// Real alpha = xi*G0*I0*(dT)*sqrt(p_tr);
// Real B0 = s_2 + tau_tr + alpha;
// Real H0 = s_2*tau_tr + s_0*alpha;
// Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9);
// if(tau_tr>s_0){
// Real coeff = tau_n1/(tau_tr+1e-9);
// updatedTauXxYyZz = updatedTauXxYyZz*coeff;
// updatedTauXyXzYz = updatedTauXyXzYz*coeff;
// }
Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA;
// should use tau_max instead of s_0 according to
// "A constitutive law for dense granular flows" Nature 2006
if (tau_tr > tau_max) {
Real coeff = tau_max / (tau_tr + 1e-9);
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
}
}
// Set stress to zero if the pressure is smaller than the threshold
if (p_tr < p_cri) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
// Real coeff = abs(p_cri / (p_tr + 1e-9));
// if (p_tr < 2.0 * p_cri){
// coeff = 0.0;
// } else {
// coeff = abs(1.0 - (p_tr - p_cri) / p_cri);
// }
// updatedTauXxYyZz = updatedTauXxYyZz * coeff;
// updatedTauXyXzYz = updatedTauXyXzYz * coeff;
// p_tr = p_cri * coeff;
}
// Set stress to zero if the particle is close to free surface
if (freeSurfaceIdD[index] == 1) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
}
if (paramsD.output_length == 2) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) +
square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z));
tau_tr = sqrt(0.5 * tau_tr);
sr_tau_I_mu_iD[index].y = tau_tr;
}
tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr);
tauXyXzYzD[index] = updatedTauXyXzYz;
}
//-------------
// ** position
//-------------
Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH *
Real3 posRad = mR3(posRadD[index]);
Real3 updatedPositon = posRad + vel_XSPH * dT;
if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) {
printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
posRadD[index] = mR4(updatedPositon, h);
//-------------
// ** velocity
//-------------
// Note that the velocity update should not use the XSPH contribution
// It adds dissipation to the solution, and provides numerical damping
Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index]
Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT;
velMasD[index] = updatedVelocity;
//-------------
// ** density
//-------------
if (paramsD.elastic_SPH) { // This is only implemented for granular material
rhoPresMu.y = p_tr;
rhoPresMu.x = paramsD.rho0;
} else {
Real rho2 = rhoPresMu.x + derivVelRho.w * dT;
rhoPresMu.y = Eos(rho2, rhoPresMu.w);
rhoPresMu.x = rho2;
}
if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) {
printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
rhoPresMuD[index] = rhoPresMu;
}
// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time
// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces.
// calculate the force that is f=m dv/dt
// derivVelRhoD[index] *= paramsD.markerMass;
}
//------------------------------------------------------------------------------
__global__ void Update_Fluid_State(Real3* new_vel,
Real4* posRad,
Real3* velMas,
Real4* rhoPreMu,
int4 updatePortion,
double dT,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= updatePortion.y)
return;
velMas[i_idx] = new_vel[i_idx];
Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx];
Real h = posRad[i_idx].w;
posRad[i_idx] = mR4(newpos, h);
if (!(isfinite(posRad[i_idx].x) &&
isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) {
printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n",
i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w);
}
if (!(isfinite(rhoPreMu[i_idx].x) &&
isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) {
printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n",
i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w);
}
if (!(isfinite(velMas[i_idx].x) &&
isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) {
printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n",
i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z);
}
}
// -----------------------------------------------------------------------------
// Kernel for updating the density.
// It calculates the density of the particle. It does include the normalization
// close to the boundaries and free surface.
__global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
// read particle data from sorted arrays
Real3 posRadA = mR3(sortedPosRad[index]);
Real4 rhoPreMuA = sortedRhoPreMu[index];
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real numerator = 0.0;
Real denominator = 0.0;
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
collideCellDensityReInit(numerator, denominator, neighbourPos, index,
posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd);
}
}
}
rhoPreMuA.x = numerator; // denominator;
// rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w);
dummySortedRhoPreMu[index] = rhoPreMuA;
}
// -----------------------------------------------------------------------------
// Kernel for updating the activity of all particles.
__global__ void UpdateActivityD(Real4* posRadD,
Real3* velMasD,
Real3* posRigidBodiesD,
Real3* pos_fsi_fea_D,
uint* activityIdentifierD,
uint* extendedActivityIdD,
int2 updatePortion,
Real Time,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
// Set the particle as an active particle
activityIdentifierD[index] = 1;
extendedActivityIdD[index] = 1;
// If during the settling phase, all particles are active
if (Time < paramsD.settlingTime)
return;
size_t numRigidBodies = numObjectsD.numRigidBodies;
size_t numFlexNodes = numObjectsD.numFlexNodes;
size_t numTotal = numRigidBodies + numFlexNodes;
// Check the activity of this particle
uint isNotActive = 0;
uint isNotExtended = 0;
Real3 Acdomain = paramsD.bodyActiveDomain;
Real3 ExAcdomain = paramsD.bodyActiveDomain +
mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML);
Real3 posRadA = mR3(posRadD[index]);
for (uint num = 0; num < numRigidBodies; num++) {
Real3 detPos = posRadA - posRigidBodiesD[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
for (uint num = 0; num < numFlexNodes; num++) {
Real3 detPos = posRadA - pos_fsi_fea_D[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
// Set the particle as an inactive particle if needed
if (isNotActive == numTotal && numTotal > 0) {
activityIdentifierD[index] = 0;
velMasD[index] = mR3(0.0);
}
if (isNotExtended == numTotal && numTotal > 0)
extendedActivityIdD[index] = 0;
return;
}
// -----------------------------------------------------------------------------
// CLASS FOR FLUID DYNAMICS SYSTEM
// -----------------------------------------------------------------------------
ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker,
ChSystemFsi_impl& otherFsiSystem,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
TimeIntegrator type,
bool verb)
: fsiSystem(otherFsiSystem),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
integrator_type(type),
verbose(verb) {
switch (integrator_type) {
case TimeIntegrator::I2SPH:
forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an I2SPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::IISPH:
forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an IISPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::EXPLICITSPH:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created a WCSPH framework =======" << endl;
cout << "============================================" << endl;
}
break;
// Extend this function with your own linear solvers
default:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl;
}
}
// -----------------------------------------------------------------------------
ChFluidDynamics::~ChFluidDynamics() {}
// -----------------------------------------------------------------------------
void ChFluidDynamics::Initialize() {
forceSystem->Initialize();
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real dT,
Real Time) {
if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) {
this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, fsiMeshD, Time);
forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD);
} else
forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD);
if (integrator_type == TimeIntegrator::IISPH)
this->UpdateFluid_Implicit(sphMarkersD2);
else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH)
this->UpdateFluid(sphMarkersD1, dT);
this->ApplyBoundarySPH_Markers(sphMarkersD2);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real Time) {
// Update portion of the SPH particles (should be all particles here)
int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( UpdateActivityD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD),
mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR3CAST(fsiMeshD->pos_fsi_fea_D),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD),
updatePortion, Time, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
//------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateActivityD!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) {
// Update portion of the SPH particles (should be fluid particles only here)
int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( UpdateFluidD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD),
mR3CAST(sphMarkersD->tauXyXzYzD),
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD),
mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD),
updatePortion, dT, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
//------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateFluidD!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x,
fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0);
cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl;
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Update_Fluid_State), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, paramsH->dT, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in x, y, and z directions
void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ApplyPeriodicBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
// ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>>
// (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD));
// hipDeviceSynchronize();
// cudaCheckError();
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in y, and z.
// The inlet/outlet BC is applied in the x direction.
// This functions needs to be tested.
void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ApplyInletBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD));
hipDeviceSynchronize();
cudaCheckError();
// these are useful anyway for out of bound particles
hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::DensityReinitialization() {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers);
thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0));
hipLaunchKernelGGL(( ReCalcDensityD_F1), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(dummySortedRhoPreMu),
mR4CAST(fsiSystem.sortedSphMarkersD->posRadD),
mR3CAST(fsiSystem.sortedSphMarkersD->velMasD),
mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD),
U1CAST(fsiSystem.markersProximityD->cellStartD),
U1CAST(fsiSystem.markersProximityD->cellEndD));
hipDeviceSynchronize();
cudaCheckError();
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
dummySortedRhoPreMu.clear();
}
} // namespace fsi
} // end namespace chrono
| 6f36cfce968fc16c41224f774fd8b0b50c41ceef.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki, Wei Hu
// =============================================================================
//
// Class for performing time integration in fluid system.
// =============================================================================
#include "chrono_fsi/physics/ChFluidDynamics.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
using std::cout;
using std::endl;
namespace chrono {
namespace fsi {
// -----------------------------------------------------------------------------
// Device function to calculate the share of density influence on a given
// particle from all other particle in a given cell
__device__ void collideCellDensityReInit(Real& numerator,
Real& denominator,
int3 gridPos,
uint index,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real4 rhoPreMuB = sortedRhoPreMu[j];
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML)
continue;
numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w);
denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w);
}
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along x
__global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y += paramsD.deltaPress.x;
return;
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y -= paramsD.deltaPress.x;
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply inlet/outlet BC along x
__global__ void ApplyInletBoundaryXKernel(Real4* posRadD,
Real3* VelMassD,
Real4* rhoPresMuD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (rhoPresMu.w > 0.0)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w <= 0.0) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
if (rhoPresMu.w <= -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x > -paramsD.x_in)
rhoPresMuD[index].y = 0;
if (posRad.x < paramsD.x_in)
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along y
__global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.y > paramsD.cMax.y) {
posRad.y -= (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.y < paramsD.cMin.y) {
posRad.y += (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along z
__global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.z > paramsD.cMax.z) {
posRad.z -= (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.z < paramsD.cMin.z) {
posRad.z += (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to keep particle inside the simulation domain
__global__ void ApplyOutOfBoundaryKernel(Real4* posRadD,
Real4* rhoPresMuD,
Real3* velMasD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real3 vel = mR3(velMasD[index]);
Real h = posRadD[index].w;
if (posRad.x > 0.5 * paramsD.boxDimX)
posRad.x = 0.5 * paramsD.boxDimX;
if (posRad.x < -0.5 * paramsD.boxDimX)
posRad.x = -0.5 * paramsD.boxDimX;
if (posRad.y > 0.5 * paramsD.boxDimY)
posRad.y = 0.5 * paramsD.boxDimY;
if (posRad.y < -0.5 * paramsD.boxDimY)
posRad.y = -0.5 * paramsD.boxDimY;
if (posRad.z > 1.0 * paramsD.boxDimZ)
posRad.z = 1.0 * paramsD.boxDimZ;
if (posRad.z < -0.0 * paramsD.boxDimZ)
posRad.z = -0.0 * paramsD.boxDimZ;
posRadD[index] = mR4(posRad, h);
velMasD[index] = mR3(vel);
return;
}
// -----------------------------------------------------------------------------
// Kernel to update the fluid properities. It updates the stress tensor,
// density, velocity and position relying on explicit Euler scheme.
// Pressure is obtained from the density and an Equation of State.
__global__ void UpdateFluidD(Real4* posRadD,
Real3* velMasD,
Real4* rhoPresMuD,
Real3* tauXxYyZzD,
Real3* tauXyXzYzD,
Real3* vel_XSPH_D,
Real4* derivVelRhoD,
Real3* derivTauXxYyZzD,
Real3* derivTauXyXzYzD,
Real4* sr_tau_I_mu_iD,
uint* activityIdentifierD,
uint* freeSurfaceIdD,
int2 updatePortion,
Real dT,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return;
Real4 derivVelRho = derivVelRhoD[index];
Real4 rhoPresMu = rhoPresMuD[index];
Real h = posRadD[index].w;
Real p_tr, p_n;
if (rhoPresMu.w < 0) {
// This is only implemented for granular material
if (paramsD.elastic_SPH) {
//--------------------------------
// ** total stress tau
//--------------------------------
Real3 tauXxYyZz = tauXxYyZzD[index];
Real3 tauXyXzYz = tauXyXzYzD[index];
Real3 derivTauXxYyZz = derivTauXxYyZzD[index];
Real3 derivTauXyXzYz = derivTauXyXzYzD[index];
Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT;
Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT;
// check if there is a plastic flow
p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z);
tauXxYyZz.x += p_n;
tauXxYyZz.y += p_n;
tauXxYyZz.z += p_n;
p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z);
updatedTauXxYyZz.x += p_tr;
updatedTauXxYyZz.y += p_tr;
updatedTauXxYyZz.z += p_tr;
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) +
2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z);
Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) +
2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z);
tau_tr = sqrt(0.5 * tau_tr);
tau_n = sqrt(0.5 * tau_n);
Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT;
// should use the positive magnitude according to "A
// constitutive law for dense granular flows" Nature 2006
Real mu_s = paramsD.mu_fric_s;
Real mu_2 = paramsD.mu_fric_2;
// Real s_0 = mu_s * p_tr;
// Real s_2 = mu_2 * p_tr;
// Real xi = 1.1;
Real dia = paramsD.ave_diam;
Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);//
Real I = Chi * dia * sqrt( paramsD.rho0 / ( p_tr + 1.0e9 ) );
Real coh = paramsD.Coh_coeff;
// Real Chi_cri = 0.1;
// if (Chi < Chi_cri){
// coh = paramsD.Coh_coeff * (1.0 - sin(-1.57 + 3.14 * (Chi / Chi_cri))) / 2.0;
// // coh = paramsD.Coh_coeff * (1.0 - I / I_cri);
// } else {
// coh = 0.0;
// }
Real inv_mus = 1.0 / paramsD.mu_fric_s;
Real p_cri = - coh * inv_mus;
if (p_tr > p_cri) {
Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0e-9) / (I0 + I + 1.0e-9);
// Real G0 = paramsD.G_shear;
// Real alpha = xi*G0*I0*(dT)*sqrt(p_tr);
// Real B0 = s_2 + tau_tr + alpha;
// Real H0 = s_2*tau_tr + s_0*alpha;
// Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9);
// if(tau_tr>s_0){
// Real coeff = tau_n1/(tau_tr+1e-9);
// updatedTauXxYyZz = updatedTauXxYyZz*coeff;
// updatedTauXyXzYz = updatedTauXyXzYz*coeff;
// }
Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA;
// should use tau_max instead of s_0 according to
// "A constitutive law for dense granular flows" Nature 2006
if (tau_tr > tau_max) {
Real coeff = tau_max / (tau_tr + 1e-9);
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
}
}
// Set stress to zero if the pressure is smaller than the threshold
if (p_tr < p_cri) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
// Real coeff = abs(p_cri / (p_tr + 1e-9));
// if (p_tr < 2.0 * p_cri){
// coeff = 0.0;
// } else {
// coeff = abs(1.0 - (p_tr - p_cri) / p_cri);
// }
// updatedTauXxYyZz = updatedTauXxYyZz * coeff;
// updatedTauXyXzYz = updatedTauXyXzYz * coeff;
// p_tr = p_cri * coeff;
}
// Set stress to zero if the particle is close to free surface
if (freeSurfaceIdD[index] == 1) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
}
if (paramsD.output_length == 2) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) +
square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z));
tau_tr = sqrt(0.5 * tau_tr);
sr_tau_I_mu_iD[index].y = tau_tr;
}
tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr);
tauXyXzYzD[index] = updatedTauXyXzYz;
}
//-------------
// ** position
//-------------
Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH *
Real3 posRad = mR3(posRadD[index]);
Real3 updatedPositon = posRad + vel_XSPH * dT;
if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) {
printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
posRadD[index] = mR4(updatedPositon, h);
//-------------
// ** velocity
//-------------
// Note that the velocity update should not use the XSPH contribution
// It adds dissipation to the solution, and provides numerical damping
Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index]
Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT;
velMasD[index] = updatedVelocity;
//-------------
// ** density
//-------------
if (paramsD.elastic_SPH) { // This is only implemented for granular material
rhoPresMu.y = p_tr;
rhoPresMu.x = paramsD.rho0;
} else {
Real rho2 = rhoPresMu.x + derivVelRho.w * dT;
rhoPresMu.y = Eos(rho2, rhoPresMu.w);
rhoPresMu.x = rho2;
}
if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) {
printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
rhoPresMuD[index] = rhoPresMu;
}
// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time
// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces.
// calculate the force that is f=m dv/dt
// derivVelRhoD[index] *= paramsD.markerMass;
}
//------------------------------------------------------------------------------
__global__ void Update_Fluid_State(Real3* new_vel,
Real4* posRad,
Real3* velMas,
Real4* rhoPreMu,
int4 updatePortion,
double dT,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= updatePortion.y)
return;
velMas[i_idx] = new_vel[i_idx];
Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx];
Real h = posRad[i_idx].w;
posRad[i_idx] = mR4(newpos, h);
if (!(isfinite(posRad[i_idx].x) &&
isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) {
printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n",
i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w);
}
if (!(isfinite(rhoPreMu[i_idx].x) &&
isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) {
printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n",
i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w);
}
if (!(isfinite(velMas[i_idx].x) &&
isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) {
printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n",
i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z);
}
}
// -----------------------------------------------------------------------------
// Kernel for updating the density.
// It calculates the density of the particle. It does include the normalization
// close to the boundaries and free surface.
__global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
// read particle data from sorted arrays
Real3 posRadA = mR3(sortedPosRad[index]);
Real4 rhoPreMuA = sortedRhoPreMu[index];
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real numerator = 0.0;
Real denominator = 0.0;
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
collideCellDensityReInit(numerator, denominator, neighbourPos, index,
posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd);
}
}
}
rhoPreMuA.x = numerator; // denominator;
// rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w);
dummySortedRhoPreMu[index] = rhoPreMuA;
}
// -----------------------------------------------------------------------------
// Kernel for updating the activity of all particles.
__global__ void UpdateActivityD(Real4* posRadD,
Real3* velMasD,
Real3* posRigidBodiesD,
Real3* pos_fsi_fea_D,
uint* activityIdentifierD,
uint* extendedActivityIdD,
int2 updatePortion,
Real Time,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
// Set the particle as an active particle
activityIdentifierD[index] = 1;
extendedActivityIdD[index] = 1;
// If during the settling phase, all particles are active
if (Time < paramsD.settlingTime)
return;
size_t numRigidBodies = numObjectsD.numRigidBodies;
size_t numFlexNodes = numObjectsD.numFlexNodes;
size_t numTotal = numRigidBodies + numFlexNodes;
// Check the activity of this particle
uint isNotActive = 0;
uint isNotExtended = 0;
Real3 Acdomain = paramsD.bodyActiveDomain;
Real3 ExAcdomain = paramsD.bodyActiveDomain +
mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML);
Real3 posRadA = mR3(posRadD[index]);
for (uint num = 0; num < numRigidBodies; num++) {
Real3 detPos = posRadA - posRigidBodiesD[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
for (uint num = 0; num < numFlexNodes; num++) {
Real3 detPos = posRadA - pos_fsi_fea_D[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
// Set the particle as an inactive particle if needed
if (isNotActive == numTotal && numTotal > 0) {
activityIdentifierD[index] = 0;
velMasD[index] = mR3(0.0);
}
if (isNotExtended == numTotal && numTotal > 0)
extendedActivityIdD[index] = 0;
return;
}
// -----------------------------------------------------------------------------
// CLASS FOR FLUID DYNAMICS SYSTEM
// -----------------------------------------------------------------------------
ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker,
ChSystemFsi_impl& otherFsiSystem,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
TimeIntegrator type,
bool verb)
: fsiSystem(otherFsiSystem),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
integrator_type(type),
verbose(verb) {
switch (integrator_type) {
case TimeIntegrator::I2SPH:
forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an I2SPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::IISPH:
forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an IISPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::EXPLICITSPH:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created a WCSPH framework =======" << endl;
cout << "============================================" << endl;
}
break;
// Extend this function with your own linear solvers
default:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl;
}
}
// -----------------------------------------------------------------------------
ChFluidDynamics::~ChFluidDynamics() {}
// -----------------------------------------------------------------------------
void ChFluidDynamics::Initialize() {
forceSystem->Initialize();
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real dT,
Real Time) {
if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) {
this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, fsiMeshD, Time);
forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD);
} else
forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD);
if (integrator_type == TimeIntegrator::IISPH)
this->UpdateFluid_Implicit(sphMarkersD2);
else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH)
this->UpdateFluid(sphMarkersD1, dT);
this->ApplyBoundarySPH_Markers(sphMarkersD2);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real Time) {
// Update portion of the SPH particles (should be all particles here)
int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
UpdateActivityD<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD),
mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR3CAST(fsiMeshD->pos_fsi_fea_D),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD),
updatePortion, Time, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
//------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateActivityD!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) {
// Update portion of the SPH particles (should be fluid particles only here)
int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
UpdateFluidD<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD),
mR3CAST(sphMarkersD->tauXyXzYzD),
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD),
mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD),
updatePortion, dT, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
//------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateFluidD!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x,
fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0);
cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl;
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Update_Fluid_State<<<numBlocks, numThreads>>>(
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, paramsH->dT, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in x, y, and z directions
void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
ApplyPeriodicBoundaryXKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
// ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>>
// (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD));
// cudaDeviceSynchronize();
// cudaCheckError();
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in y, and z.
// The inlet/outlet BC is applied in the x direction.
// This functions needs to be tested.
void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
ApplyInletBoundaryXKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD));
cudaDeviceSynchronize();
cudaCheckError();
// these are useful anyway for out of bound particles
ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::DensityReinitialization() {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers);
thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0));
ReCalcDensityD_F1<<<numBlocks, numThreads>>>(
mR4CAST(dummySortedRhoPreMu),
mR4CAST(fsiSystem.sortedSphMarkersD->posRadD),
mR3CAST(fsiSystem.sortedSphMarkersD->velMasD),
mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD),
U1CAST(fsiSystem.markersProximityD->cellStartD),
U1CAST(fsiSystem.markersProximityD->cellEndD));
cudaDeviceSynchronize();
cudaCheckError();
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
dummySortedRhoPreMu.clear();
}
} // namespace fsi
} // end namespace chrono
|
e92e5b6500524960d53735247f88c096a442c89f.hip | // !!! This is a file automatically generated by hipify!!!
/* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include <iostream>
#include "translator/nth_element.h"
#include <hip/hip_runtime.h>
#include "tensors/gpu/cuda_helpers.h"
namespace marian {
#define UNROLL_MAXARG_LOOP(n, max) \
if(tid < (n) && tid + (n) < (max)) { \
if(sdata[tid + (n)] > sdata[tid]) { \
sdata[tid] = sdata[tid + (n)]; \
indices[tid] = indices[tid + (n)]; \
} \
}
__global__ void gMaxElement(float* d_out,
int* d_ind,
float* d_in,
int numBatches,
int* batchFirstElementIdxs) {
extern __shared__ float sdata[];
__shared__ int indices[512];
int tid = threadIdx.x;
for(int batchIdx = 0; batchIdx < numBatches; ++batchIdx) {
int begin = batchFirstElementIdxs[batchIdx];
int end = batchFirstElementIdxs[batchIdx + 1];
int i = begin + blockIdx.x * (blockDim.x * 2) + tid;
sdata[tid] = -3.40282e+38f;
if(i < end) {
sdata[tid] = d_in[i];
indices[tid] = i;
}
if(i + blockDim.x < end) {
float a = d_in[i];
float b = d_in[i + blockDim.x];
if(a > b) {
sdata[tid] = a;
indices[tid] = i;
} else {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
while(i + 2 * gridDim.x * blockDim.x < end) {
i += 2 * gridDim.x * blockDim.x;
float a = d_in[i];
if(a > sdata[tid]) {
sdata[tid] = a;
indices[tid] = i;
}
if(i + blockDim.x < end) {
float b = d_in[i + blockDim.x];
if(b > sdata[tid]) {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < end) {
if(sdata[tid + s] > sdata[tid]) {
sdata[tid] = sdata[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, end);
UNROLL_MAXARG_LOOP(16, end);
UNROLL_MAXARG_LOOP(8, end);
UNROLL_MAXARG_LOOP(4, end);
UNROLL_MAXARG_LOOP(2, end);
UNROLL_MAXARG_LOOP(1, end);
if(tid == 0) {
d_out[blockIdx.x + batchIdx * gridDim.x] = sdata[0];
d_ind[blockIdx.x + batchIdx * gridDim.x] = indices[0];
}
__syncthreads();
}
}
__global__ void gMaxElementUpdate(float* binCosts,
int* binIdxs,
float* probs,
int* batchFirstElements,
float* outCosts,
int* outIdxs,
int* cummulatedBeamSizes,
int NUM_BLOCKS) {
extern __shared__ float sdata[];
__shared__ int indices[512];
__shared__ float bestBinCost;
__shared__ int bestBinCostIdx;
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int N = batchFirstElements[batchIdx + 1] - batchFirstElements[batchIdx];
int num_bins = int(N / (2 * 512)) + int(N % (2 * 512) != 0);
if(num_bins > 500) {
num_bins = 500;
}
for(int pos = cummulatedBeamSizes[batchIdx];
pos < cummulatedBeamSizes[batchIdx + 1];
++pos) {
int i = tid;
sdata[tid] = -3.40282e+38f;
if(i < num_bins) {
sdata[tid] = binCosts[batchIdx * NUM_BLOCKS + i];
indices[tid] = i;
}
if(i + blockDim.x < num_bins) {
float a = binCosts[batchIdx * NUM_BLOCKS + i];
float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x];
if(a > b) {
sdata[tid] = a;
indices[tid] = i;
} else {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
while(i + 2 * blockDim.x < num_bins) {
i += 2 * blockDim.x;
float a = binCosts[batchIdx * NUM_BLOCKS + i];
if(a > sdata[tid]) {
sdata[tid] = a;
indices[tid] = i;
}
if(i + blockDim.x < num_bins) {
float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x];
if(b > sdata[tid]) {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < num_bins) {
if(sdata[tid + s] > sdata[tid]) {
sdata[tid] = sdata[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, num_bins);
UNROLL_MAXARG_LOOP(16, num_bins);
UNROLL_MAXARG_LOOP(8, num_bins);
UNROLL_MAXARG_LOOP(4, num_bins);
UNROLL_MAXARG_LOOP(2, num_bins);
UNROLL_MAXARG_LOOP(1, num_bins);
if(tid == 0) {
bestBinCost = sdata[0];
bestBinCostIdx = batchIdx * NUM_BLOCKS + indices[0];
probs[binIdxs[bestBinCostIdx]] = -3.40282e+38f;
outIdxs[pos] = binIdxs[bestBinCostIdx];
outCosts[pos] = bestBinCost;
}
__syncthreads();
i = batchFirstElements[batchIdx]
+ (bestBinCostIdx - batchIdx * NUM_BLOCKS) * (blockDim.x * 2) + tid;
const int dist = num_bins * 2 * blockDim.x;
sdata[tid] = -3.40282e+38f;
if(i < batchFirstElements[batchIdx + 1]) {
sdata[tid] = probs[i];
indices[tid] = i;
}
if(i + blockDim.x < batchFirstElements[batchIdx + 1]) {
float a = probs[i];
float b = probs[i + blockDim.x];
if(a > b) {
sdata[tid] = a;
indices[tid] = i;
} else {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
while(i + dist < batchFirstElements[batchIdx + 1]) {
i += dist;
float a = probs[i];
if(a > sdata[tid]) {
sdata[tid] = a;
indices[tid] = i;
}
if(i + blockDim.x < batchFirstElements[batchIdx + 1]) {
float b = probs[i + blockDim.x];
if(b > sdata[tid]) {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < batchFirstElements[batchIdx + 1]) {
if(sdata[tid + s] > sdata[tid]) {
sdata[tid] = sdata[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(16, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(8, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(4, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(2, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(1, batchFirstElements[batchIdx + 1]);
if(tid == 0) {
binCosts[bestBinCostIdx] = sdata[0];
binIdxs[bestBinCostIdx] = indices[0];
}
__syncthreads();
}
}
__global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < n) {
int index = indeces[tid];
d_out[tid] = d_in[index];
}
}
NthElementGPU::NthElementGPU(size_t maxBeamSize,
size_t maxBatchSize,
DeviceId deviceId)
: deviceId_(deviceId),
NUM_BLOCKS(::min(
500,
int(maxBeamSize* MAX_VOCAB_SIZE / (2 * BLOCK_SIZE))
+ int(maxBeamSize* MAX_VOCAB_SIZE % (2 * BLOCK_SIZE) != 0))) {
// std::cerr << "NthElement::NthElement" << std::endl;
hipSetDevice(deviceId_.no);
CUDA_CHECK(
hipMalloc((void**)&d_ind, maxBatchSize * NUM_BLOCKS * sizeof(int)));
CUDA_CHECK(
hipMalloc((void**)&d_out, maxBatchSize * NUM_BLOCKS * sizeof(float)));
CUDA_CHECK(
hipMalloc((void**)&d_res_idx, maxBatchSize * maxBeamSize * sizeof(int)));
CUDA_CHECK(
hipMalloc((void**)&d_res, maxBatchSize * maxBeamSize * sizeof(float)));
CUDA_CHECK(hipHostMalloc((void**)&h_res,
maxBeamSize * maxBatchSize * sizeof(float),
hipHostMallocDefault));
CUDA_CHECK(hipHostMalloc((void**)&h_res_idx,
maxBeamSize * maxBatchSize * sizeof(int),
hipHostMallocDefault));
CUDA_CHECK(hipMalloc((void**)&d_breakdown, maxBeamSize * sizeof(float)));
CUDA_CHECK(
hipMalloc((void**)&d_batchPosition, (maxBatchSize + 1) * sizeof(int)));
CUDA_CHECK(
hipMalloc((void**)&d_cumBeamSizes, (maxBatchSize + 1) * sizeof(int)));
}
NthElementGPU::~NthElementGPU() {
hipSetDevice(deviceId_.no);
CUDA_CHECK(hipFree(d_ind));
CUDA_CHECK(hipFree(d_out));
CUDA_CHECK(hipFree(d_res_idx));
CUDA_CHECK(hipFree(d_res));
CUDA_CHECK(hipHostFree(h_res));
CUDA_CHECK(hipHostFree(h_res_idx));
CUDA_CHECK(hipFree(d_breakdown));
CUDA_CHECK(hipFree(d_batchPosition));
CUDA_CHECK(hipFree(d_cumBeamSizes));
}
void NthElementGPU::getNBestList(float* probs,
const std::vector<int>& batchFirstElementIdxs,
const std::vector<int>& cummulatedBeamSizes) {
hipSetDevice(deviceId_.no);
CUDA_CHECK(hipMemcpyAsync(d_batchPosition,
batchFirstElementIdxs.data(),
batchFirstElementIdxs.size() * sizeof(int),
hipMemcpyHostToDevice,
/* stream_ */ 0));
CUDA_CHECK(hipMemcpyAsync(d_cumBeamSizes,
cummulatedBeamSizes.data(),
cummulatedBeamSizes.size() * sizeof(int),
hipMemcpyHostToDevice,
/* stream_ */ 0));
const int numBatches = batchFirstElementIdxs.size() - 1;
hipLaunchKernelGGL(( gMaxElement), dim3(NUM_BLOCKS),
dim3(BLOCK_SIZE),
BLOCK_SIZE * sizeof(float),
/* stream_ */ 0,
d_out, d_ind, probs, numBatches, d_batchPosition);
hipLaunchKernelGGL(( gMaxElementUpdate), dim3(numBatches),
dim3(BLOCK_SIZE),
BLOCK_SIZE * sizeof(float),
/* stream_ */ 0, d_out,
d_ind,
probs,
d_batchPosition,
d_res,
d_res_idx,
d_cumBeamSizes,
NUM_BLOCKS);
}
void NthElementGPU::getNBestList(const std::vector<size_t>& beamSizes,
Tensor Probs,
std::vector<float>& outCosts,
std::vector<unsigned>& outKeys,
const bool isFirst) {
hipSetDevice(deviceId_.no);
std::vector<int> cummulatedBeamSizes(beamSizes.size() + 1, 0);
std::vector<int> batchFirstElementIdxs(beamSizes.size() + 1, 0);
const size_t vocabSize = Probs->shape()[-1];
for(size_t i = 0; i < beamSizes.size(); ++i) {
cummulatedBeamSizes[i + 1] = cummulatedBeamSizes[i] + beamSizes[i];
batchFirstElementIdxs[i + 1]
+= ((isFirst) ? (i + 1) : cummulatedBeamSizes[i + 1]) * vocabSize;
}
getNBestList(Probs->data(), batchFirstElementIdxs, cummulatedBeamSizes);
GetPairs(cummulatedBeamSizes.back(), outKeys, outCosts);
}
void NthElementGPU::GetPairs(size_t number,
std::vector<unsigned>& outKeys,
std::vector<float>& outValues) {
hipSetDevice(deviceId_.no);
CUDA_CHECK(hipMemcpyAsync(h_res,
d_res,
number * sizeof(float),
hipMemcpyDeviceToHost,
/* stream_ */ 0));
CUDA_CHECK(hipMemcpyAsync(h_res_idx,
d_res_idx,
number * sizeof(int),
hipMemcpyDeviceToHost,
/* stream_ */ 0));
hipStreamSynchronize(/* stream_ */ 0);
for(size_t i = 0; i < number; ++i) {
outKeys.push_back(h_res_idx[i]);
outValues.push_back(h_res[i]);
}
lastN = number;
}
void NthElementGPU::getValueByKey(std::vector<float>& out, float* d_in) {
hipSetDevice(deviceId_.no);
hipLaunchKernelGGL(( gGetValueByKey), dim3(1), dim3(lastN), 0, /* stream_ */ 0,
d_in, d_breakdown, h_res_idx, lastN);
CUDA_CHECK(hipMemcpyAsync(out.data(),
d_breakdown,
lastN * sizeof(float),
hipMemcpyDeviceToHost,
/* stream_ */ 0));
CUDA_CHECK(hipStreamSynchronize(/* stream_ */ 0));
}
}
| e92e5b6500524960d53735247f88c096a442c89f.cu | /* All or part of this file was contributed by Intel under license:
* Copyright (C) 2017-2018 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include <iostream>
#include "translator/nth_element.h"
#include <cuda.h>
#include "tensors/gpu/cuda_helpers.h"
namespace marian {
#define UNROLL_MAXARG_LOOP(n, max) \
if(tid < (n) && tid + (n) < (max)) { \
if(sdata[tid + (n)] > sdata[tid]) { \
sdata[tid] = sdata[tid + (n)]; \
indices[tid] = indices[tid + (n)]; \
} \
}
__global__ void gMaxElement(float* d_out,
int* d_ind,
float* d_in,
int numBatches,
int* batchFirstElementIdxs) {
extern __shared__ float sdata[];
__shared__ int indices[512];
int tid = threadIdx.x;
for(int batchIdx = 0; batchIdx < numBatches; ++batchIdx) {
int begin = batchFirstElementIdxs[batchIdx];
int end = batchFirstElementIdxs[batchIdx + 1];
int i = begin + blockIdx.x * (blockDim.x * 2) + tid;
sdata[tid] = -3.40282e+38f;
if(i < end) {
sdata[tid] = d_in[i];
indices[tid] = i;
}
if(i + blockDim.x < end) {
float a = d_in[i];
float b = d_in[i + blockDim.x];
if(a > b) {
sdata[tid] = a;
indices[tid] = i;
} else {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
while(i + 2 * gridDim.x * blockDim.x < end) {
i += 2 * gridDim.x * blockDim.x;
float a = d_in[i];
if(a > sdata[tid]) {
sdata[tid] = a;
indices[tid] = i;
}
if(i + blockDim.x < end) {
float b = d_in[i + blockDim.x];
if(b > sdata[tid]) {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < end) {
if(sdata[tid + s] > sdata[tid]) {
sdata[tid] = sdata[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, end);
UNROLL_MAXARG_LOOP(16, end);
UNROLL_MAXARG_LOOP(8, end);
UNROLL_MAXARG_LOOP(4, end);
UNROLL_MAXARG_LOOP(2, end);
UNROLL_MAXARG_LOOP(1, end);
if(tid == 0) {
d_out[blockIdx.x + batchIdx * gridDim.x] = sdata[0];
d_ind[blockIdx.x + batchIdx * gridDim.x] = indices[0];
}
__syncthreads();
}
}
__global__ void gMaxElementUpdate(float* binCosts,
int* binIdxs,
float* probs,
int* batchFirstElements,
float* outCosts,
int* outIdxs,
int* cummulatedBeamSizes,
int NUM_BLOCKS) {
extern __shared__ float sdata[];
__shared__ int indices[512];
__shared__ float bestBinCost;
__shared__ int bestBinCostIdx;
const int tid = threadIdx.x;
const int batchIdx = blockIdx.x;
const int N = batchFirstElements[batchIdx + 1] - batchFirstElements[batchIdx];
int num_bins = int(N / (2 * 512)) + int(N % (2 * 512) != 0);
if(num_bins > 500) {
num_bins = 500;
}
for(int pos = cummulatedBeamSizes[batchIdx];
pos < cummulatedBeamSizes[batchIdx + 1];
++pos) {
int i = tid;
sdata[tid] = -3.40282e+38f;
if(i < num_bins) {
sdata[tid] = binCosts[batchIdx * NUM_BLOCKS + i];
indices[tid] = i;
}
if(i + blockDim.x < num_bins) {
float a = binCosts[batchIdx * NUM_BLOCKS + i];
float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x];
if(a > b) {
sdata[tid] = a;
indices[tid] = i;
} else {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
while(i + 2 * blockDim.x < num_bins) {
i += 2 * blockDim.x;
float a = binCosts[batchIdx * NUM_BLOCKS + i];
if(a > sdata[tid]) {
sdata[tid] = a;
indices[tid] = i;
}
if(i + blockDim.x < num_bins) {
float b = binCosts[batchIdx * NUM_BLOCKS + i + blockDim.x];
if(b > sdata[tid]) {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < num_bins) {
if(sdata[tid + s] > sdata[tid]) {
sdata[tid] = sdata[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, num_bins);
UNROLL_MAXARG_LOOP(16, num_bins);
UNROLL_MAXARG_LOOP(8, num_bins);
UNROLL_MAXARG_LOOP(4, num_bins);
UNROLL_MAXARG_LOOP(2, num_bins);
UNROLL_MAXARG_LOOP(1, num_bins);
if(tid == 0) {
bestBinCost = sdata[0];
bestBinCostIdx = batchIdx * NUM_BLOCKS + indices[0];
probs[binIdxs[bestBinCostIdx]] = -3.40282e+38f;
outIdxs[pos] = binIdxs[bestBinCostIdx];
outCosts[pos] = bestBinCost;
}
__syncthreads();
i = batchFirstElements[batchIdx]
+ (bestBinCostIdx - batchIdx * NUM_BLOCKS) * (blockDim.x * 2) + tid;
const int dist = num_bins * 2 * blockDim.x;
sdata[tid] = -3.40282e+38f;
if(i < batchFirstElements[batchIdx + 1]) {
sdata[tid] = probs[i];
indices[tid] = i;
}
if(i + blockDim.x < batchFirstElements[batchIdx + 1]) {
float a = probs[i];
float b = probs[i + blockDim.x];
if(a > b) {
sdata[tid] = a;
indices[tid] = i;
} else {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
while(i + dist < batchFirstElements[batchIdx + 1]) {
i += dist;
float a = probs[i];
if(a > sdata[tid]) {
sdata[tid] = a;
indices[tid] = i;
}
if(i + blockDim.x < batchFirstElements[batchIdx + 1]) {
float b = probs[i + blockDim.x];
if(b > sdata[tid]) {
sdata[tid] = b;
indices[tid] = i + blockDim.x;
}
}
}
__syncthreads();
for(int s = (blockDim.x >> 1); s > 32; s >>= 1) {
if(tid < s && tid + s < batchFirstElements[batchIdx + 1]) {
if(sdata[tid + s] > sdata[tid]) {
sdata[tid] = sdata[tid + s];
indices[tid] = indices[tid + s];
}
}
__syncthreads();
}
UNROLL_MAXARG_LOOP(32, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(16, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(8, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(4, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(2, batchFirstElements[batchIdx + 1]);
UNROLL_MAXARG_LOOP(1, batchFirstElements[batchIdx + 1]);
if(tid == 0) {
binCosts[bestBinCostIdx] = sdata[0];
binIdxs[bestBinCostIdx] = indices[0];
}
__syncthreads();
}
}
__global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < n) {
int index = indeces[tid];
d_out[tid] = d_in[index];
}
}
NthElementGPU::NthElementGPU(size_t maxBeamSize,
size_t maxBatchSize,
DeviceId deviceId)
: deviceId_(deviceId),
NUM_BLOCKS(std::min(
500,
int(maxBeamSize* MAX_VOCAB_SIZE / (2 * BLOCK_SIZE))
+ int(maxBeamSize* MAX_VOCAB_SIZE % (2 * BLOCK_SIZE) != 0))) {
// std::cerr << "NthElement::NthElement" << std::endl;
cudaSetDevice(deviceId_.no);
CUDA_CHECK(
cudaMalloc((void**)&d_ind, maxBatchSize * NUM_BLOCKS * sizeof(int)));
CUDA_CHECK(
cudaMalloc((void**)&d_out, maxBatchSize * NUM_BLOCKS * sizeof(float)));
CUDA_CHECK(
cudaMalloc((void**)&d_res_idx, maxBatchSize * maxBeamSize * sizeof(int)));
CUDA_CHECK(
cudaMalloc((void**)&d_res, maxBatchSize * maxBeamSize * sizeof(float)));
CUDA_CHECK(cudaHostAlloc((void**)&h_res,
maxBeamSize * maxBatchSize * sizeof(float),
cudaHostAllocDefault));
CUDA_CHECK(cudaHostAlloc((void**)&h_res_idx,
maxBeamSize * maxBatchSize * sizeof(int),
cudaHostAllocDefault));
CUDA_CHECK(cudaMalloc((void**)&d_breakdown, maxBeamSize * sizeof(float)));
CUDA_CHECK(
cudaMalloc((void**)&d_batchPosition, (maxBatchSize + 1) * sizeof(int)));
CUDA_CHECK(
cudaMalloc((void**)&d_cumBeamSizes, (maxBatchSize + 1) * sizeof(int)));
}
NthElementGPU::~NthElementGPU() {
cudaSetDevice(deviceId_.no);
CUDA_CHECK(cudaFree(d_ind));
CUDA_CHECK(cudaFree(d_out));
CUDA_CHECK(cudaFree(d_res_idx));
CUDA_CHECK(cudaFree(d_res));
CUDA_CHECK(cudaFreeHost(h_res));
CUDA_CHECK(cudaFreeHost(h_res_idx));
CUDA_CHECK(cudaFree(d_breakdown));
CUDA_CHECK(cudaFree(d_batchPosition));
CUDA_CHECK(cudaFree(d_cumBeamSizes));
}
void NthElementGPU::getNBestList(float* probs,
const std::vector<int>& batchFirstElementIdxs,
const std::vector<int>& cummulatedBeamSizes) {
cudaSetDevice(deviceId_.no);
CUDA_CHECK(cudaMemcpyAsync(d_batchPosition,
batchFirstElementIdxs.data(),
batchFirstElementIdxs.size() * sizeof(int),
cudaMemcpyHostToDevice,
/* stream_ */ 0));
CUDA_CHECK(cudaMemcpyAsync(d_cumBeamSizes,
cummulatedBeamSizes.data(),
cummulatedBeamSizes.size() * sizeof(int),
cudaMemcpyHostToDevice,
/* stream_ */ 0));
const int numBatches = batchFirstElementIdxs.size() - 1;
gMaxElement<<<NUM_BLOCKS,
BLOCK_SIZE,
BLOCK_SIZE * sizeof(float),
/* stream_ */ 0>>>(
d_out, d_ind, probs, numBatches, d_batchPosition);
gMaxElementUpdate<<<numBatches,
BLOCK_SIZE,
BLOCK_SIZE * sizeof(float),
/* stream_ */ 0>>>(d_out,
d_ind,
probs,
d_batchPosition,
d_res,
d_res_idx,
d_cumBeamSizes,
NUM_BLOCKS);
}
void NthElementGPU::getNBestList(const std::vector<size_t>& beamSizes,
Tensor Probs,
std::vector<float>& outCosts,
std::vector<unsigned>& outKeys,
const bool isFirst) {
cudaSetDevice(deviceId_.no);
std::vector<int> cummulatedBeamSizes(beamSizes.size() + 1, 0);
std::vector<int> batchFirstElementIdxs(beamSizes.size() + 1, 0);
const size_t vocabSize = Probs->shape()[-1];
for(size_t i = 0; i < beamSizes.size(); ++i) {
cummulatedBeamSizes[i + 1] = cummulatedBeamSizes[i] + beamSizes[i];
batchFirstElementIdxs[i + 1]
+= ((isFirst) ? (i + 1) : cummulatedBeamSizes[i + 1]) * vocabSize;
}
getNBestList(Probs->data(), batchFirstElementIdxs, cummulatedBeamSizes);
GetPairs(cummulatedBeamSizes.back(), outKeys, outCosts);
}
void NthElementGPU::GetPairs(size_t number,
std::vector<unsigned>& outKeys,
std::vector<float>& outValues) {
cudaSetDevice(deviceId_.no);
CUDA_CHECK(cudaMemcpyAsync(h_res,
d_res,
number * sizeof(float),
cudaMemcpyDeviceToHost,
/* stream_ */ 0));
CUDA_CHECK(cudaMemcpyAsync(h_res_idx,
d_res_idx,
number * sizeof(int),
cudaMemcpyDeviceToHost,
/* stream_ */ 0));
cudaStreamSynchronize(/* stream_ */ 0);
for(size_t i = 0; i < number; ++i) {
outKeys.push_back(h_res_idx[i]);
outValues.push_back(h_res[i]);
}
lastN = number;
}
void NthElementGPU::getValueByKey(std::vector<float>& out, float* d_in) {
cudaSetDevice(deviceId_.no);
gGetValueByKey<<<1, lastN, 0, /* stream_ */ 0>>>(
d_in, d_breakdown, h_res_idx, lastN);
CUDA_CHECK(cudaMemcpyAsync(out.data(),
d_breakdown,
lastN * sizeof(float),
cudaMemcpyDeviceToHost,
/* stream_ */ 0));
CUDA_CHECK(cudaStreamSynchronize(/* stream_ */ 0));
}
}
|
86799537220548d9b38521c2d7e040a8040d8dd6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <thrust/version.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/gather.h>
#include <thrust/logical.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/system/hip/execution_policy.h>
const int ARRAY_SIZE = 1000;
enum Method {
RAW,
WRAPPED
};
// ------------------------------------------------------------------------------------
bool check_fill(double* hA)
{
for (int i = 0; i < ARRAY_SIZE; i++) {
if (hA[i] != 9.0)
return false;
}
return true;
}
bool fill_test(Method method)
{
double* hA;
hA = (double *) malloc(sizeof(double) * ARRAY_SIZE);
for (int i = 0; i < ARRAY_SIZE; i++)
hA[i] = 0.0;
double* dA;
hipMalloc((void **) &dA, sizeof(double) * ARRAY_SIZE);
hipMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, hipMemcpyHostToDevice);
switch (method) {
case RAW:
{
thrust::fill(thrust::hip::par, dA, dA + ARRAY_SIZE, 9.0);
break;
}
case WRAPPED:
{
thrust::device_ptr<double> wdA = thrust::device_pointer_cast(dA);
thrust::fill(wdA, wdA + ARRAY_SIZE, 9.0);
break;
}
}
hipMemcpy(hA, dA, sizeof(double) * ARRAY_SIZE, hipMemcpyDeviceToHost);
bool result = check_fill(hA);
free(hA);
hipFree(dA);
return result;
}
// ------------------------------------------------------------------------------------
bool check_copy(double* hB)
{
for (int i = 0; i < ARRAY_SIZE; i++) {
if (hB[i] != 1.0 * (i + 1))
return false;
}
return true;
}
bool copy_test(Method method)
{
double* hA;
double* hB;
hA = (double *) malloc(sizeof(double) * ARRAY_SIZE);
hB = (double *) malloc(sizeof(double) * ARRAY_SIZE);
for (int i = 0; i < ARRAY_SIZE; i++) {
hA[i] = 1.0 * (i+1);
hB[i] = 0.0;
}
double* dA;
double* dB;
hipMalloc((void **) &dA, sizeof(double) * ARRAY_SIZE);
hipMalloc((void **) &dB, sizeof(double) * ARRAY_SIZE);
hipMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, sizeof(double) * ARRAY_SIZE, hipMemcpyHostToDevice);
switch (method) {
case RAW:
{
thrust::copy(thrust::hip::par, dA, dA + ARRAY_SIZE, dB);
break;
}
case WRAPPED:
{
thrust::device_ptr<double> wdA = thrust::device_pointer_cast(dA);
thrust::device_ptr<double> wdB = thrust::device_pointer_cast(dB);
thrust::copy(wdA, wdA + ARRAY_SIZE, wdB);
}
}
hipMemcpy(hB, dB, sizeof(double) * ARRAY_SIZE, hipMemcpyDeviceToHost);
bool result = check_copy(hB);
free(hA);
free(hB);
hipFree(dA);
hipFree(dB);
return result;
}
// ------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl << std::endl;
std::cout << "Fill DR ... " << std::flush << fill_test(RAW) << std::endl;
std::cout << "Fill DW ... " << std::flush << fill_test(WRAPPED) << std::endl;
std::cout << "Copy DR ... " << std::flush << copy_test(RAW) << std::endl;
std::cout << "Copy DW ... " << std::flush << copy_test(WRAPPED) << std::endl;
return 0;
}
| 86799537220548d9b38521c2d7e040a8040d8dd6.cu | #include <iostream>
#include <cmath>
#include <thrust/version.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/gather.h>
#include <thrust/logical.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/system/cuda/execution_policy.h>
const int ARRAY_SIZE = 1000;
enum Method {
RAW,
WRAPPED
};
// ------------------------------------------------------------------------------------
bool check_fill(double* hA)
{
for (int i = 0; i < ARRAY_SIZE; i++) {
if (hA[i] != 9.0)
return false;
}
return true;
}
bool fill_test(Method method)
{
double* hA;
hA = (double *) malloc(sizeof(double) * ARRAY_SIZE);
for (int i = 0; i < ARRAY_SIZE; i++)
hA[i] = 0.0;
double* dA;
cudaMalloc((void **) &dA, sizeof(double) * ARRAY_SIZE);
cudaMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, cudaMemcpyHostToDevice);
switch (method) {
case RAW:
{
thrust::fill(thrust::cuda::par, dA, dA + ARRAY_SIZE, 9.0);
break;
}
case WRAPPED:
{
thrust::device_ptr<double> wdA = thrust::device_pointer_cast(dA);
thrust::fill(wdA, wdA + ARRAY_SIZE, 9.0);
break;
}
}
cudaMemcpy(hA, dA, sizeof(double) * ARRAY_SIZE, cudaMemcpyDeviceToHost);
bool result = check_fill(hA);
free(hA);
cudaFree(dA);
return result;
}
// ------------------------------------------------------------------------------------
bool check_copy(double* hB)
{
for (int i = 0; i < ARRAY_SIZE; i++) {
if (hB[i] != 1.0 * (i + 1))
return false;
}
return true;
}
bool copy_test(Method method)
{
double* hA;
double* hB;
hA = (double *) malloc(sizeof(double) * ARRAY_SIZE);
hB = (double *) malloc(sizeof(double) * ARRAY_SIZE);
for (int i = 0; i < ARRAY_SIZE; i++) {
hA[i] = 1.0 * (i+1);
hB[i] = 0.0;
}
double* dA;
double* dB;
cudaMalloc((void **) &dA, sizeof(double) * ARRAY_SIZE);
cudaMalloc((void **) &dB, sizeof(double) * ARRAY_SIZE);
cudaMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, sizeof(double) * ARRAY_SIZE, cudaMemcpyHostToDevice);
switch (method) {
case RAW:
{
thrust::copy(thrust::cuda::par, dA, dA + ARRAY_SIZE, dB);
break;
}
case WRAPPED:
{
thrust::device_ptr<double> wdA = thrust::device_pointer_cast(dA);
thrust::device_ptr<double> wdB = thrust::device_pointer_cast(dB);
thrust::copy(wdA, wdA + ARRAY_SIZE, wdB);
}
}
cudaMemcpy(hB, dB, sizeof(double) * ARRAY_SIZE, cudaMemcpyDeviceToHost);
bool result = check_copy(hB);
free(hA);
free(hB);
cudaFree(dA);
cudaFree(dB);
return result;
}
// ------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl << std::endl;
std::cout << "Fill DR ... " << std::flush << fill_test(RAW) << std::endl;
std::cout << "Fill DW ... " << std::flush << fill_test(WRAPPED) << std::endl;
std::cout << "Copy DR ... " << std::flush << copy_test(RAW) << std::endl;
std::cout << "Copy DW ... " << std::flush << copy_test(WRAPPED) << std::endl;
return 0;
}
|
2df28e11b1175a71704036d087d9975690006099.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include </home/sbimavar/cuda/ALS/final/lin_equation_solve.cu>
void create_diagonal_matrix(double *Dmatrix, double * matrix, int array_length)
{
for(int i=0;i<array_length;i++)
{
for(int j=0;j<array_length;j++)
{
if(i==j)
Dmatrix[j*array_length+i]=matrix[i];
else
Dmatrix[j*array_length+i]=0;
}
}
}
// Multiply the arrays A and B on GPU and save the result in C
// C(m,n) = A(m,k) * B(k,n)
void gpu_blas_mmul(const double *A, const double *B, double *C, const int m, const int k, const int n)
{
int lda=m,ldb=k,ldc=m;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Do the actual multiplication
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
// Destroy the handle
hipblasDestroy(handle);
}
__global__ void Add(double * dX_out, double * dX_in, double * dY_in)
{
int idx = threadIdx.x;
dX_out[idx] = dX_in[idx] + dY_in[idx];
}
__global__ void trans(double * dX_out, double * dX_in, int total_rows, int total_columns)
{
int idx = threadIdx.x;
int row_number = idx / total_columns;
// printf("%d \t", row_number);
int col_number = idx - (row_number * total_columns);
// printf("%d \t", col_number);
int index = idx;
int new_r = col_number;
int new_c = row_number;
int new_index = (new_r * total_rows) + new_c;
// printf("%d \t", new_index);
dX_out[new_index] = dX_in[index];
// printf("%d \t", dX_out[new_index]);
}
void calling_trans(int nr_rows_A, int nr_cols_A, double * h_Ab, double * h_Ab_out)
{
const int ARRAY_BYTES_x = nr_rows_A * nr_cols_A * sizeof(double);
double * dX_in;
double * dX_out;
// allocate GPU memory
hipMalloc((void**) &dX_in, ARRAY_BYTES_x);
hipMalloc((void**) &dX_out, ARRAY_BYTES_x);
// transfer the array to the GPU
hipMemcpy(dX_in, h_Ab, ARRAY_BYTES_x, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( trans), dim3(1), dim3(nr_rows_A * nr_cols_A), 0, 0, dX_out, dX_in, nr_rows_A, nr_cols_A);
hipMemcpy(h_Ab_out, dX_out, ARRAY_BYTES_x, hipMemcpyDeviceToHost);
hipFree(dX_in);
hipFree(dX_out);
}
void mul(int nr_rows_A, int nr_cols_A, int nr_rows_B, int nr_cols_B, int nr_rows_finalproduct, int nr_cols_finalproduct, double * h_Ab, double * h_Bb, double * final)
{
//transpose of first matrix starts here
double *h_Ab_out;
h_Ab_out = (double *)malloc(nr_rows_A * nr_cols_A * sizeof(double));
calling_trans(nr_rows_A, nr_cols_A, h_Ab, h_Ab_out);
//transpose of second matrix starts here
double *h_Bb_out;
h_Bb_out = (double *)malloc(nr_rows_B * nr_cols_B * sizeof(double));
calling_trans(nr_rows_B, nr_cols_B, h_Bb, h_Bb_out);
//multiplication to get the resultant matrix starts here
int nr_rows_product = nr_rows_A;
int nr_cols_product = nr_cols_B;
double *product = (double *)malloc(nr_rows_product * nr_cols_product * sizeof(double));
// Allocate 3 arrays on GPU
double *d_A, *d_B, *d_C;
hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(double));
hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(double));
hipMalloc(&d_C,nr_rows_product * nr_cols_product * sizeof(double));
// If you already have useful values in A and B you can copy them in GPU:
hipMemcpy(d_A,h_Ab_out,nr_rows_A * nr_cols_A * sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_B,h_Bb_out,nr_rows_B * nr_cols_B * sizeof(double),hipMemcpyHostToDevice);
gpu_blas_mmul(d_A, d_B, d_C, nr_rows_product, nr_cols_A, nr_cols_product);
hipMemcpy(product,d_C,nr_rows_product * nr_cols_product * sizeof(double),hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Now, taking the transpose of the resultant matrix (resultant matrix's dimensions (no.of.rows switched with no.of.cols)) gives us the final correct answer
calling_trans(nr_rows_finalproduct, nr_cols_finalproduct, product, final);
free(h_Ab_out);
free(h_Bb_out);
free(product);
}
void als_solve_X(double *X, int u, double *Y, double *Wu, int n_factors, double lambda, double *Qu, int Y_rows, int Y_cols, int W_rows, int W_cols)
{
//std::cout<<"Inside x \n";
//find diag(Wu) --> Wu_diag -----------------------------------------------------------------------------------
double * Wu_diag;
Wu_diag = (double *)malloc(W_cols * W_cols * sizeof(double));
create_diagonal_matrix(Wu_diag, Wu, W_cols);
/* printf("find diag(Wu) --> Wu_diag is \n");
for(int i=0; i<(W_cols*W_cols); i++)
{
printf("%f \t", Wu_diag[i]);
}
printf("\n"); */
//find Y.T --> Y_trans ---------------------------------------------------------------------------------------------
double *Y_trans;
Y_trans = (double *)malloc(Y_rows * Y_cols * sizeof(double));
calling_trans(Y_rows, Y_cols, Y, Y_trans);
/* printf("find Y.T --> Y_trans is \n");
for(int i = 0; i<(Y_rows * Y_cols); i++){
printf("%f \t", Y_trans[i]);
}
printf("\n"); */
//find eye(n_factors) and multiply with lambda --> eye_lambda_matrix ------------------------------------------------
double *eye_lambda;
eye_lambda = (double *)malloc(n_factors * sizeof(double));
for(int i = 0; i< n_factors; i++){
eye_lambda[i] = lambda;
}
double * eye_lambda_matrix;
eye_lambda_matrix = (double *)malloc(n_factors * n_factors * sizeof(double));
create_diagonal_matrix(eye_lambda_matrix, eye_lambda, n_factors);
/* printf("find eye(n_factors) and multiply with lambda --> eye_lambda_matrix is \n");
for(int i = 0; i<(n_factors*n_factors); i++){
printf("%f \t", eye_lambda_matrix[i]);
}
printf("\n"); */
//dot product of Wu_diag, Y_trans --> temp_1 ----------------------------------------------------------------------------
int nr_rows_finalproduct = Y_rows;
int nr_cols_finalproduct = W_cols;
double *temp_1;
temp_1 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_cols, W_cols, Y_cols, Y_rows, nr_rows_finalproduct, nr_cols_finalproduct, Wu_diag, Y_trans, temp_1);
/* printf("The dot product of Wu_diag, Y_trans --> temp_1 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", temp_1[i]);
}
printf("\n"); */
//clear Y_trans
free(Y_trans); // clearing CPU memory
//dot product of Y, temp_1 --> temp_2 -----------------------------------------------------------------------------------
nr_rows_finalproduct = Y_rows;
nr_cols_finalproduct = Y_rows;
double *temp_2;
temp_2 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(Y_rows, Y_cols, W_cols, Y_rows, nr_rows_finalproduct, nr_cols_finalproduct, Y, temp_1, temp_2);
/* printf("dot product of Y, temp_1 --> temp_2 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", temp_2[i]);
}
printf("\n"); */
//clear temp_1
free(temp_1); // clearing CPU memory
//addition of temp_2, eye_lambda_matrix --> A (alias add_result) ----------------------------------------------------------------------------
const int ARRAY_BYTES = nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double);
double * A;
A = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
double * dX_input;
double * dY_input;
double * D_A;
hipMalloc((void**) &dX_input, ARRAY_BYTES);
hipMalloc((void**) &dY_input, ARRAY_BYTES);
hipMalloc((void**) &D_A, ARRAY_BYTES);
hipMemcpy(dX_input, temp_2, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(dY_input, eye_lambda_matrix, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Add), dim3(1), dim3((nr_rows_finalproduct * nr_cols_finalproduct)), 0, 0, D_A, dX_input, dY_input);
hipMemcpy(A, D_A, ARRAY_BYTES, hipMemcpyDeviceToHost);
/* printf("addition of temp_2, eye_lambda_matrix --> A (alias add_result) is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", A[i]);
}
printf("\n"); */
//clear temp_2 and eye_lambda_matrix
free(temp_2); // clearing CPU memory
free(eye_lambda);
free(eye_lambda_matrix);
//-------------------------------------
//find transpose of Qu --> Qu_trans ----------------------------------------------------------------------------
double *Qu_trans;
Qu_trans = (double *)malloc(1 * W_cols * sizeof(double));
calling_trans(1, W_cols, Qu, Qu_trans); // is it necessary doing this ?????
/* printf("find transpose of Qu --> Qu_trans is \n");
for(int i = 0; i < W_cols; i++){
printf("%f \t", Qu_trans[i]);
}
printf("\n"); */
//find dot product Wu_diag, Qu_trans --> temp3 ----------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = W_cols;
double *temp3;
temp3 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_cols, W_cols, W_cols, 1, nr_rows_finalproduct, nr_cols_finalproduct, Wu_diag, Qu_trans, temp3);
/* printf("find dot product Wu_diag, Qu_trans --> temp3 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", temp3[i]);
}
printf("\n"); */
//clear Wu_diag and Qu_trans
free(Wu_diag);
free(Qu_trans);
//dot product of Y(3*5) and temp3(5*1) --> B (3*1) ----------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = Y_rows;
double *B;
B = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(Y_rows, Y_cols, W_cols, 1, nr_rows_finalproduct, nr_cols_finalproduct, Y, temp3, B);
/* printf("dot product of Y(3*5) and temp3(5*1) --> B (3*1) is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", B[i]);
}
printf("\n"); */
//clear temp3
free(temp3);
//linalg(A, B) --> x_X -------------------------------------------------------------------------------------------------------------------
/* double *A_final;
A_final = (double *)malloc(Y_rows*Y_rows*sizeof(double));
std::copy(A, A + (Y_rows*Y_rows), A_final);
double *B_final;
B_final = (double *)malloc(Y_rows*1*sizeof(double));
std::copy(B, B + (Y_rows*1), B_final); */
double * x_X;
x_X = (double *)malloc(Y_rows*1*sizeof(double));
const int nrhs = 1;
lin_alg_solve(x_X, A, B, Y_rows, nrhs);
/* double * X_final;
X_final = (double *)malloc(Y_rows*1*sizeof(double));
std::copy(x_X, x_X + (Y_rows*1), X_final); */
/* printf("linalg(A, B) --> x_X is \n");
for(int i = 0; i < (Y_rows*1); i++)
printf("%f \n",x_X[i]); */
// clear A, B
free(A);
free(B);
for(int i = 0; i < n_factors; i++)
{
X[u*n_factors + i] = x_X[i];
}
free(x_X);
}
void als_solve_Y(double * YT, int i_Y, double * X, double * Wi, int n_factors, double lambda, double * Qi, int X_rows, int X_cols, int W_rows, int W_cols)
{
// create diagonal matrix of Wi --> Wi_diag ------------------------------------------------------------------------------------------
double * Wi_diag;
Wi_diag = (double *)malloc(W_rows * W_rows * sizeof(double));
create_diagonal_matrix(Wi_diag, Wi, W_rows);
/* printf("find diag(Wi) --> Wi_diag is \n");
for(int i=0; i<(W_rows*W_rows); i++)
{
printf("%f \t", Wi_diag[i]);
}
printf("\n"); */
//dot product of Wi_diag and X --> matrix1 ------------------------------------------------------------------------------------------
int nr_rows_finalproduct = X_cols;
int nr_cols_finalproduct = W_rows;
double *matrix1;
matrix1 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_rows, W_rows, X_rows, X_cols, nr_rows_finalproduct, nr_cols_finalproduct, Wi_diag, X, matrix1);
/* printf("The dot product of Wi_diag and X --> matrix1 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", matrix1[i]);
}
printf("\n"); */
//tranpose of X --> X_trans ---------------------------------------------------------------------------------------------------------
double *X_trans;
X_trans = (double *)malloc(X_cols * X_rows * sizeof(double));
calling_trans(X_rows, X_cols, X, X_trans);
/* printf("find X.T --> X_trans is \n");
for(int i = 0; i<(X_cols * X_rows); i++){
printf("%f \t", X_trans[i]);
}
printf("\n"); */
//dot product of X_trans and matrix1 --> matrix2 ------------------------------------------------------------------------------------------
nr_rows_finalproduct = X_cols;
nr_cols_finalproduct = X_cols;
double *matrix2;
matrix2 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(X_cols, X_rows, W_rows, X_cols, nr_rows_finalproduct, nr_cols_finalproduct, X_trans, matrix1, matrix2);
/* printf("The dot product of X_trans and matrix1 --> matrix2 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", matrix2[i]);
}
printf("\n"); */
free(matrix1);
//find eye(n_factors) and multiply with lambda --> eye_lambda (matrix) --------------------------------------------------------------------------------
double *eye_lambda;
eye_lambda = (double *)malloc(n_factors * sizeof(double));
for(int i = 0; i< n_factors; i++){
eye_lambda[i] = lambda;
}
double * eye_lambda_matrix;
eye_lambda_matrix = (double *)malloc(n_factors * n_factors * sizeof(double));
create_diagonal_matrix(eye_lambda_matrix, eye_lambda, n_factors);
/* printf("find eye(n_factors) and multiply with lambda --> eye_lambda_matrix is \n");
for(int i = 0; i<(n_factors*n_factors); i++){
printf("%f \t", eye_lambda_matrix[i]);
}
printf("\n"); */
free(eye_lambda);
//addition of matrix2 and eye_lambda_matrix --> A ----------------------------------------------------------------------------------------------------
const int ARRAY_BYTES = nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double);
double * A;
A = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
double * dX_input;
double * dY_input;
double * D_A;
hipMalloc((void**) &dX_input, ARRAY_BYTES);
hipMalloc((void**) &dY_input, ARRAY_BYTES);
hipMalloc((void**) &D_A, ARRAY_BYTES);
hipMemcpy(dX_input, matrix2, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(dY_input, eye_lambda_matrix, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Add), dim3(1), dim3((nr_rows_finalproduct * nr_cols_finalproduct)), 0, 0, D_A, dX_input, dY_input);
hipMemcpy(A, D_A, ARRAY_BYTES, hipMemcpyDeviceToHost);
/* printf("addition of matrix2, eye_lambda --> A is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", A[i]);
}
printf("\n"); */
free(eye_lambda_matrix);
hipFree(dX_input);
hipFree(dY_input);
hipFree(D_A);
free(matrix2);
//------------------------------------------
//dot product of Wi_diag and Qi --> matrix3 ------------------------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = W_rows;
double *matrix3;
matrix3 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_rows, W_rows, W_rows, 1, nr_rows_finalproduct, nr_cols_finalproduct, Wi_diag, Qi, matrix3);
/* printf("The dot product of Wi_diag and Qi --> matrix3 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", matrix3[i]);
}
printf("\n"); */
free(Wi_diag);
//dot product of X_trans and matrix3 --> B ------------------------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = X_cols;
double *B;
B = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(X_cols, X_rows, W_rows, 1, nr_rows_finalproduct, nr_cols_finalproduct, X_trans, matrix3, B);
/* printf("The dot product of X_trans and matrix3 --> B is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", B[i]);
}
printf("\n"); */
free(X_trans);
free(matrix3);
//linalg(A, B) --> x_Y -----------------------------------------------------------------------------------------------------------
double * x_Y;
x_Y = (double *)malloc(X_cols*1*sizeof(double));
const int nrhs = 1;
lin_alg_solve(x_Y, A, B, X_cols, nrhs);
/* printf("linalg(A, B) --> x_Y is \n");
for(int i = 0; i < (X_cols*1); i++)
printf("%f \n",x_Y[i]); */
for(int i = 0; i < n_factors; i++)
{
YT[i_Y*n_factors + i] = x_Y[i];
}
free(A);
free(B);
free(x_Y);
}
__global__ void Subtraction(double * dX_out, double * dX_in, double * dY_in)
{
int idx = threadIdx.x;
dX_out[idx] = dX_in[idx] - dY_in[idx];
}
__global__ void element_multiplication(double * dX_out, double * dX_in, double * dY_in)
{
int idx = threadIdx.x;
dX_out[idx] = dX_in[idx] * dY_in[idx];
}
void rmse(double * squares, double * Q, double * X, double * Y, double * W, int Q_rows, int Q_cols, int n_factors)
{
//dot product of X and Y --> Q_hat
int Q_hat_rows = Q_cols;
int Q_hat_cols = Q_rows;
double *Q_hat;
Q_hat = (double *)malloc(Q_hat_rows * Q_hat_cols * sizeof(double));
mul(Q_rows, n_factors, n_factors, Q_cols, Q_hat_rows, Q_hat_cols, X, Y, Q_hat);
/* printf("The dot product of X and Y --> Q_hat is \n");
for (int i = 0; i<(Q_hat_rows * Q_hat_cols); i++)
{
printf("%f \t", Q_hat[i]);
}
printf("\n"); */
//subtraction of Q_hat from Q --> sub
const int ARRAY_BYTES = Q_rows * Q_cols * sizeof(double);
double * sub;
sub = (double *)malloc(Q_rows * Q_cols * sizeof(double));
double * dX_input;
double * dY_input;
double * D_sub;
hipMalloc((void**) &dX_input, ARRAY_BYTES);
hipMalloc((void**) &dY_input, ARRAY_BYTES);
hipMalloc((void**) &D_sub, ARRAY_BYTES);
hipMemcpy(dX_input, Q, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(dY_input, Q_hat, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Subtraction), dim3(1), dim3((Q_rows * Q_cols)), 0, 0, D_sub, dX_input, dY_input);
hipMemcpy(sub, D_sub, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(dX_input);
hipFree(dY_input);
hipFree(D_sub);
/* printf("subtraction of Q_hat from Q --> sub is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", sub[i]);
}
printf("\n"); */
// element by element multiplication of W and sub --> mul_result
//const int ARRAY_BYTES = Q_rows * Q_cols * sizeof(double);
double * mul_result;
mul_result = (double *)malloc(Q_rows * Q_cols * sizeof(double));
double * D_mul;
hipMalloc((void**) &dX_input, ARRAY_BYTES);
hipMalloc((void**) &dY_input, ARRAY_BYTES);
hipMalloc((void**) &D_mul, ARRAY_BYTES);
hipMemcpy(dX_input, W, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(dY_input, sub, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( element_multiplication), dim3(1), dim3((Q_rows * Q_cols)), 0, 0, D_mul, dX_input, dY_input);
hipMemcpy(mul_result, D_mul, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(dX_input);
hipFree(dY_input);
hipFree(D_mul);
/* printf("element by element multiplication of W and sub --> mul_result is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", mul_result[i]);
}
printf("\n"); */
// element by element multiplication of mul_result and mul_result --> squares
double * D_SoS;
hipMalloc((void**) &dX_input, ARRAY_BYTES);
hipMalloc((void**) &dY_input, ARRAY_BYTES);
hipMalloc((void**) &D_SoS, ARRAY_BYTES);
hipMemcpy(dX_input, mul_result, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(dY_input, mul_result, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( element_multiplication), dim3(1), dim3((Q_rows * Q_cols)), 0, 0, D_SoS, dX_input, dY_input);
hipMemcpy(squares, D_SoS, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(dX_input);
hipFree(dY_input);
hipFree(D_SoS);
/* printf("element by element multiplication of mul_result and mul_result --> squares is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", squares[i]);
}
printf("\n"); */
}
int main()
{
int Q_rows = 4;
int Q_cols = 5;
double Q_temp[20] = {3, 4, 0, 2, 0, 0, 2, 1, 0, 0, 1, 0, 3, 4, 0, 0, 0, 0, 1, 3};
double *Q;
Q = (double *)malloc(Q_rows * Q_cols * sizeof(double));
memcpy(Q, Q_temp, sizeof(double)*20);
int n_factors = 3;
double lambda = 0.1;
//int n_iterations = 20;
int X_rows = 4;
int X_cols = n_factors;
double X_temp[12] = {1.04537429, 3.45278132, 3.47493422, 2.24801288, 4.88137731, 1.66288503, 4.81317032, 4.63570752, 1.36892613, 3.32203655, 3.31923711, 2.27048096};
double *X;
X = (double *)malloc(X_rows * X_cols * sizeof(double));
memcpy(X, X_temp, sizeof(double)*12);
//for(int i = 0; i<12;i++)
// printf("%.8f \t", X[i]);
printf("\n");
int Y_rows = n_factors;
int Y_cols = 5;
double Y_temp[15] = {1.59982314, 4.78360092, 3.45781337, 3.13286951, 0.50542705, 3.83681956, 2.88250821, 1.1667597 , 2.43170423, 4.06026517, 0.65686686, 2.94705632, 0.46822364, 1.98082364, 1.54905706};
double *Y;
Y = (double *)malloc(Y_rows * Y_cols * sizeof(double));
memcpy(Y, Y_temp, sizeof(double)*15);
int W_rows = Q_rows;
int W_cols = Q_cols;
double W_temp[20] = {1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1};
double *W;
W = (double *)malloc(W_rows * W_cols * sizeof(double));
memcpy(W, W_temp, sizeof(double)*20);
/* for (int i = 0; i<20; i++)
{
printf("%f \t", W[i]);
}
printf("\n");
*/
//Main computation starts here with n_iterations
for(int it = 0; it<20;it++)
{
//updating all rows of X starts here
double *Wu;
Wu = (double *)malloc(W_cols * sizeof(double));
//Wu[0] = 1; Wu[1] = 1; Wu[2] = 0; Wu[3] = 1; Wu[4] = 0;
double *Qu;
Qu = (double *)malloc(W_cols * sizeof(double));
//Qu[0] = 3; Qu[1] = 4; Qu[2] = 0; Qu[3] = 2; Qu[4] = 0;
//#pragma omp parallel for private(Wu, Qu)
for(int u = 0; u < W_rows; u++)
{
for(int k = 0; k < W_cols; k++)
{
Wu[k] = W[u*W_cols + k];
Qu[k] = Q[u*W_cols + k];
}
als_solve_X(X, u, Y, Wu, n_factors, lambda, Qu, Y_rows, Y_cols, W_rows, W_cols);
}
/* printf("X with updated row is \n");
for (int i = 0; i< 12; i++)
{
printf("%f \t", X[i]);
}
printf("\n"); */
//updating all columns of Y starts here
double *Wi;
Wi = (double *)malloc(W_rows * sizeof(double));
//Wi[0] = 1; Wi[1] = 0; Wi[2] = 1; Wi[3] = 0;
double *Qi;
Qi = (double *)malloc(W_rows * sizeof(double));
//Qi[0] = 3; Qi[1] = 0; Qi[2] = 1; Qi[3] = 0;
//take the transpose of W to get WT
double *WT;
WT = (double *)malloc(W_cols * W_rows * sizeof(double));
calling_trans(W_rows, W_cols, W, WT);
/* printf("find W.T --> WT is \n");
for(int i = 0; i<(W_cols * W_rows); i++){
printf("%f \t", WT[i]);
}
printf("\n"); */
//take the transpose of Q to get QT
double *QT;
QT = (double *)malloc(Q_cols * Q_rows * sizeof(double));
calling_trans(Q_rows, Q_cols, Q, QT);
//take the transpose of Y to get YT
double *YT;
YT = (double *)malloc(Y_cols * Y_rows * sizeof(double));
calling_trans(Y_rows, Y_cols, Y, YT);
//#pragma omp parallel for private(Wi, Qi)
for(int i_Y = 0; i_Y < W_cols; i_Y++)
{
for(int p = 0; p < W_rows; p++)
{
Wi[p] = WT[i_Y*W_rows + p];
Qi[p] = QT[i_Y*W_rows + p];
}
als_solve_Y(YT, i_Y, X, Wi, n_factors, lambda, Qi, X_rows, X_cols, W_rows, W_cols);
}
calling_trans(Y_cols, Y_rows, YT, Y);
//call rmse() function to get rmse values
double * Squares;
Squares = (double *)malloc(Q_rows * Q_cols * sizeof(double));
rmse(Squares, Q, X, Y, W, Q_rows, Q_cols, n_factors);
double error = 0;
for(int i = 0; i < (Q_rows * Q_cols); i++)
{
error = error + Squares[i];
}
printf("%f \n", error);
} //n-iterations end here
/* printf("X with updated row is \n");
for (int i = 0; i< 12; i++)
{
printf("%f \t", X[i]);
}
printf("\n");
printf("Y with updated column is \n");
for (int i = 0; i< 15; i++)
{
printf("%f \t", Y[i]);
}
printf("\n"); */
//dot product of X and Y starts here AFTER the end of n_iterations --> Q_hat
int Q_hat_rows = Q_cols;
int Q_hat_cols = Q_rows;
double *Q_hat;
Q_hat = (double *)malloc(Q_hat_rows * Q_hat_cols * sizeof(double));
mul(Q_rows, Y_rows, Y_rows, Q_cols, Q_hat_rows, Q_hat_cols, X, Y, Q_hat);
printf("The dot product of X and Y --> Q_hat is \n");
for (int i = 0; i<(Q_hat_rows * Q_hat_cols); i++)
{
printf("%f \t", Q_hat[i]);
}
printf("\n");
printf("Q is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", Q[i]);
}
printf("\n");
// printing the recommendations
for(int i = 0; i < Q_rows; i++)
{
for(int j = 0; j < Q_cols; j++)
{
if((Q[i * Q_cols + j] == 0) && (Q_hat[i * Q_cols + j] > 1.5))
{
printf("user %d may like movie %d \n", i, j);
}
}
}
}
| 2df28e11b1175a71704036d087d9975690006099.cu | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include </home/sbimavar/cuda/ALS/final/lin_equation_solve.cu>
void create_diagonal_matrix(double *Dmatrix, double * matrix, int array_length)
{
for(int i=0;i<array_length;i++)
{
for(int j=0;j<array_length;j++)
{
if(i==j)
Dmatrix[j*array_length+i]=matrix[i];
else
Dmatrix[j*array_length+i]=0;
}
}
}
// Multiply the arrays A and B on GPU and save the result in C
// C(m,n) = A(m,k) * B(k,n)
void gpu_blas_mmul(const double *A, const double *B, double *C, const int m, const int k, const int n)
{
int lda=m,ldb=k,ldc=m;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Do the actual multiplication
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc);
// Destroy the handle
cublasDestroy(handle);
}
__global__ void Add(double * dX_out, double * dX_in, double * dY_in)
{
int idx = threadIdx.x;
dX_out[idx] = dX_in[idx] + dY_in[idx];
}
__global__ void trans(double * dX_out, double * dX_in, int total_rows, int total_columns)
{
int idx = threadIdx.x;
int row_number = idx / total_columns;
// printf("%d \t", row_number);
int col_number = idx - (row_number * total_columns);
// printf("%d \t", col_number);
int index = idx;
int new_r = col_number;
int new_c = row_number;
int new_index = (new_r * total_rows) + new_c;
// printf("%d \t", new_index);
dX_out[new_index] = dX_in[index];
// printf("%d \t", dX_out[new_index]);
}
void calling_trans(int nr_rows_A, int nr_cols_A, double * h_Ab, double * h_Ab_out)
{
const int ARRAY_BYTES_x = nr_rows_A * nr_cols_A * sizeof(double);
double * dX_in;
double * dX_out;
// allocate GPU memory
cudaMalloc((void**) &dX_in, ARRAY_BYTES_x);
cudaMalloc((void**) &dX_out, ARRAY_BYTES_x);
// transfer the array to the GPU
cudaMemcpy(dX_in, h_Ab, ARRAY_BYTES_x, cudaMemcpyHostToDevice);
trans<<<1, nr_rows_A * nr_cols_A>>>(dX_out, dX_in, nr_rows_A, nr_cols_A);
cudaMemcpy(h_Ab_out, dX_out, ARRAY_BYTES_x, cudaMemcpyDeviceToHost);
cudaFree(dX_in);
cudaFree(dX_out);
}
void mul(int nr_rows_A, int nr_cols_A, int nr_rows_B, int nr_cols_B, int nr_rows_finalproduct, int nr_cols_finalproduct, double * h_Ab, double * h_Bb, double * final)
{
//transpose of first matrix starts here
double *h_Ab_out;
h_Ab_out = (double *)malloc(nr_rows_A * nr_cols_A * sizeof(double));
calling_trans(nr_rows_A, nr_cols_A, h_Ab, h_Ab_out);
//transpose of second matrix starts here
double *h_Bb_out;
h_Bb_out = (double *)malloc(nr_rows_B * nr_cols_B * sizeof(double));
calling_trans(nr_rows_B, nr_cols_B, h_Bb, h_Bb_out);
//multiplication to get the resultant matrix starts here
int nr_rows_product = nr_rows_A;
int nr_cols_product = nr_cols_B;
double *product = (double *)malloc(nr_rows_product * nr_cols_product * sizeof(double));
// Allocate 3 arrays on GPU
double *d_A, *d_B, *d_C;
cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(double));
cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(double));
cudaMalloc(&d_C,nr_rows_product * nr_cols_product * sizeof(double));
// If you already have useful values in A and B you can copy them in GPU:
cudaMemcpy(d_A,h_Ab_out,nr_rows_A * nr_cols_A * sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_Bb_out,nr_rows_B * nr_cols_B * sizeof(double),cudaMemcpyHostToDevice);
gpu_blas_mmul(d_A, d_B, d_C, nr_rows_product, nr_cols_A, nr_cols_product);
cudaMemcpy(product,d_C,nr_rows_product * nr_cols_product * sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Now, taking the transpose of the resultant matrix (resultant matrix's dimensions (no.of.rows switched with no.of.cols)) gives us the final correct answer
calling_trans(nr_rows_finalproduct, nr_cols_finalproduct, product, final);
free(h_Ab_out);
free(h_Bb_out);
free(product);
}
void als_solve_X(double *X, int u, double *Y, double *Wu, int n_factors, double lambda, double *Qu, int Y_rows, int Y_cols, int W_rows, int W_cols)
{
//std::cout<<"Inside x \n";
//find diag(Wu) --> Wu_diag -----------------------------------------------------------------------------------
double * Wu_diag;
Wu_diag = (double *)malloc(W_cols * W_cols * sizeof(double));
create_diagonal_matrix(Wu_diag, Wu, W_cols);
/* printf("find diag(Wu) --> Wu_diag is \n");
for(int i=0; i<(W_cols*W_cols); i++)
{
printf("%f \t", Wu_diag[i]);
}
printf("\n"); */
//find Y.T --> Y_trans ---------------------------------------------------------------------------------------------
double *Y_trans;
Y_trans = (double *)malloc(Y_rows * Y_cols * sizeof(double));
calling_trans(Y_rows, Y_cols, Y, Y_trans);
/* printf("find Y.T --> Y_trans is \n");
for(int i = 0; i<(Y_rows * Y_cols); i++){
printf("%f \t", Y_trans[i]);
}
printf("\n"); */
//find eye(n_factors) and multiply with lambda --> eye_lambda_matrix ------------------------------------------------
double *eye_lambda;
eye_lambda = (double *)malloc(n_factors * sizeof(double));
for(int i = 0; i< n_factors; i++){
eye_lambda[i] = lambda;
}
double * eye_lambda_matrix;
eye_lambda_matrix = (double *)malloc(n_factors * n_factors * sizeof(double));
create_diagonal_matrix(eye_lambda_matrix, eye_lambda, n_factors);
/* printf("find eye(n_factors) and multiply with lambda --> eye_lambda_matrix is \n");
for(int i = 0; i<(n_factors*n_factors); i++){
printf("%f \t", eye_lambda_matrix[i]);
}
printf("\n"); */
//dot product of Wu_diag, Y_trans --> temp_1 ----------------------------------------------------------------------------
int nr_rows_finalproduct = Y_rows;
int nr_cols_finalproduct = W_cols;
double *temp_1;
temp_1 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_cols, W_cols, Y_cols, Y_rows, nr_rows_finalproduct, nr_cols_finalproduct, Wu_diag, Y_trans, temp_1);
/* printf("The dot product of Wu_diag, Y_trans --> temp_1 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", temp_1[i]);
}
printf("\n"); */
//clear Y_trans
free(Y_trans); // clearing CPU memory
//dot product of Y, temp_1 --> temp_2 -----------------------------------------------------------------------------------
nr_rows_finalproduct = Y_rows;
nr_cols_finalproduct = Y_rows;
double *temp_2;
temp_2 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(Y_rows, Y_cols, W_cols, Y_rows, nr_rows_finalproduct, nr_cols_finalproduct, Y, temp_1, temp_2);
/* printf("dot product of Y, temp_1 --> temp_2 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", temp_2[i]);
}
printf("\n"); */
//clear temp_1
free(temp_1); // clearing CPU memory
//addition of temp_2, eye_lambda_matrix --> A (alias add_result) ----------------------------------------------------------------------------
const int ARRAY_BYTES = nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double);
double * A;
A = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
double * dX_input;
double * dY_input;
double * D_A;
cudaMalloc((void**) &dX_input, ARRAY_BYTES);
cudaMalloc((void**) &dY_input, ARRAY_BYTES);
cudaMalloc((void**) &D_A, ARRAY_BYTES);
cudaMemcpy(dX_input, temp_2, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(dY_input, eye_lambda_matrix, ARRAY_BYTES, cudaMemcpyHostToDevice);
Add<<<1, (nr_rows_finalproduct * nr_cols_finalproduct)>>>(D_A, dX_input, dY_input);
cudaMemcpy(A, D_A, ARRAY_BYTES, cudaMemcpyDeviceToHost);
/* printf("addition of temp_2, eye_lambda_matrix --> A (alias add_result) is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", A[i]);
}
printf("\n"); */
//clear temp_2 and eye_lambda_matrix
free(temp_2); // clearing CPU memory
free(eye_lambda);
free(eye_lambda_matrix);
//-------------------------------------
//find transpose of Qu --> Qu_trans ----------------------------------------------------------------------------
double *Qu_trans;
Qu_trans = (double *)malloc(1 * W_cols * sizeof(double));
calling_trans(1, W_cols, Qu, Qu_trans); // is it necessary doing this ?????
/* printf("find transpose of Qu --> Qu_trans is \n");
for(int i = 0; i < W_cols; i++){
printf("%f \t", Qu_trans[i]);
}
printf("\n"); */
//find dot product Wu_diag, Qu_trans --> temp3 ----------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = W_cols;
double *temp3;
temp3 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_cols, W_cols, W_cols, 1, nr_rows_finalproduct, nr_cols_finalproduct, Wu_diag, Qu_trans, temp3);
/* printf("find dot product Wu_diag, Qu_trans --> temp3 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", temp3[i]);
}
printf("\n"); */
//clear Wu_diag and Qu_trans
free(Wu_diag);
free(Qu_trans);
//dot product of Y(3*5) and temp3(5*1) --> B (3*1) ----------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = Y_rows;
double *B;
B = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(Y_rows, Y_cols, W_cols, 1, nr_rows_finalproduct, nr_cols_finalproduct, Y, temp3, B);
/* printf("dot product of Y(3*5) and temp3(5*1) --> B (3*1) is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", B[i]);
}
printf("\n"); */
//clear temp3
free(temp3);
//linalg(A, B) --> x_X -------------------------------------------------------------------------------------------------------------------
/* double *A_final;
A_final = (double *)malloc(Y_rows*Y_rows*sizeof(double));
std::copy(A, A + (Y_rows*Y_rows), A_final);
double *B_final;
B_final = (double *)malloc(Y_rows*1*sizeof(double));
std::copy(B, B + (Y_rows*1), B_final); */
double * x_X;
x_X = (double *)malloc(Y_rows*1*sizeof(double));
const int nrhs = 1;
lin_alg_solve(x_X, A, B, Y_rows, nrhs);
/* double * X_final;
X_final = (double *)malloc(Y_rows*1*sizeof(double));
std::copy(x_X, x_X + (Y_rows*1), X_final); */
/* printf("linalg(A, B) --> x_X is \n");
for(int i = 0; i < (Y_rows*1); i++)
printf("%f \n",x_X[i]); */
// clear A, B
free(A);
free(B);
for(int i = 0; i < n_factors; i++)
{
X[u*n_factors + i] = x_X[i];
}
free(x_X);
}
void als_solve_Y(double * YT, int i_Y, double * X, double * Wi, int n_factors, double lambda, double * Qi, int X_rows, int X_cols, int W_rows, int W_cols)
{
// create diagonal matrix of Wi --> Wi_diag ------------------------------------------------------------------------------------------
double * Wi_diag;
Wi_diag = (double *)malloc(W_rows * W_rows * sizeof(double));
create_diagonal_matrix(Wi_diag, Wi, W_rows);
/* printf("find diag(Wi) --> Wi_diag is \n");
for(int i=0; i<(W_rows*W_rows); i++)
{
printf("%f \t", Wi_diag[i]);
}
printf("\n"); */
//dot product of Wi_diag and X --> matrix1 ------------------------------------------------------------------------------------------
int nr_rows_finalproduct = X_cols;
int nr_cols_finalproduct = W_rows;
double *matrix1;
matrix1 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_rows, W_rows, X_rows, X_cols, nr_rows_finalproduct, nr_cols_finalproduct, Wi_diag, X, matrix1);
/* printf("The dot product of Wi_diag and X --> matrix1 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", matrix1[i]);
}
printf("\n"); */
//tranpose of X --> X_trans ---------------------------------------------------------------------------------------------------------
double *X_trans;
X_trans = (double *)malloc(X_cols * X_rows * sizeof(double));
calling_trans(X_rows, X_cols, X, X_trans);
/* printf("find X.T --> X_trans is \n");
for(int i = 0; i<(X_cols * X_rows); i++){
printf("%f \t", X_trans[i]);
}
printf("\n"); */
//dot product of X_trans and matrix1 --> matrix2 ------------------------------------------------------------------------------------------
nr_rows_finalproduct = X_cols;
nr_cols_finalproduct = X_cols;
double *matrix2;
matrix2 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(X_cols, X_rows, W_rows, X_cols, nr_rows_finalproduct, nr_cols_finalproduct, X_trans, matrix1, matrix2);
/* printf("The dot product of X_trans and matrix1 --> matrix2 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", matrix2[i]);
}
printf("\n"); */
free(matrix1);
//find eye(n_factors) and multiply with lambda --> eye_lambda (matrix) --------------------------------------------------------------------------------
double *eye_lambda;
eye_lambda = (double *)malloc(n_factors * sizeof(double));
for(int i = 0; i< n_factors; i++){
eye_lambda[i] = lambda;
}
double * eye_lambda_matrix;
eye_lambda_matrix = (double *)malloc(n_factors * n_factors * sizeof(double));
create_diagonal_matrix(eye_lambda_matrix, eye_lambda, n_factors);
/* printf("find eye(n_factors) and multiply with lambda --> eye_lambda_matrix is \n");
for(int i = 0; i<(n_factors*n_factors); i++){
printf("%f \t", eye_lambda_matrix[i]);
}
printf("\n"); */
free(eye_lambda);
//addition of matrix2 and eye_lambda_matrix --> A ----------------------------------------------------------------------------------------------------
const int ARRAY_BYTES = nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double);
double * A;
A = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
double * dX_input;
double * dY_input;
double * D_A;
cudaMalloc((void**) &dX_input, ARRAY_BYTES);
cudaMalloc((void**) &dY_input, ARRAY_BYTES);
cudaMalloc((void**) &D_A, ARRAY_BYTES);
cudaMemcpy(dX_input, matrix2, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(dY_input, eye_lambda_matrix, ARRAY_BYTES, cudaMemcpyHostToDevice);
Add<<<1, (nr_rows_finalproduct * nr_cols_finalproduct)>>>(D_A, dX_input, dY_input);
cudaMemcpy(A, D_A, ARRAY_BYTES, cudaMemcpyDeviceToHost);
/* printf("addition of matrix2, eye_lambda --> A is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", A[i]);
}
printf("\n"); */
free(eye_lambda_matrix);
cudaFree(dX_input);
cudaFree(dY_input);
cudaFree(D_A);
free(matrix2);
//------------------------------------------
//dot product of Wi_diag and Qi --> matrix3 ------------------------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = W_rows;
double *matrix3;
matrix3 = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(W_rows, W_rows, W_rows, 1, nr_rows_finalproduct, nr_cols_finalproduct, Wi_diag, Qi, matrix3);
/* printf("The dot product of Wi_diag and Qi --> matrix3 is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", matrix3[i]);
}
printf("\n"); */
free(Wi_diag);
//dot product of X_trans and matrix3 --> B ------------------------------------------------------------------------------------------
nr_rows_finalproduct = 1;
nr_cols_finalproduct = X_cols;
double *B;
B = (double *)malloc(nr_rows_finalproduct * nr_cols_finalproduct * sizeof(double));
mul(X_cols, X_rows, W_rows, 1, nr_rows_finalproduct, nr_cols_finalproduct, X_trans, matrix3, B);
/* printf("The dot product of X_trans and matrix3 --> B is \n");
for (int i = 0; i<(nr_rows_finalproduct * nr_cols_finalproduct); i++)
{
printf("%f \t", B[i]);
}
printf("\n"); */
free(X_trans);
free(matrix3);
//linalg(A, B) --> x_Y -----------------------------------------------------------------------------------------------------------
double * x_Y;
x_Y = (double *)malloc(X_cols*1*sizeof(double));
const int nrhs = 1;
lin_alg_solve(x_Y, A, B, X_cols, nrhs);
/* printf("linalg(A, B) --> x_Y is \n");
for(int i = 0; i < (X_cols*1); i++)
printf("%f \n",x_Y[i]); */
for(int i = 0; i < n_factors; i++)
{
YT[i_Y*n_factors + i] = x_Y[i];
}
free(A);
free(B);
free(x_Y);
}
__global__ void Subtraction(double * dX_out, double * dX_in, double * dY_in)
{
int idx = threadIdx.x;
dX_out[idx] = dX_in[idx] - dY_in[idx];
}
__global__ void element_multiplication(double * dX_out, double * dX_in, double * dY_in)
{
int idx = threadIdx.x;
dX_out[idx] = dX_in[idx] * dY_in[idx];
}
void rmse(double * squares, double * Q, double * X, double * Y, double * W, int Q_rows, int Q_cols, int n_factors)
{
//dot product of X and Y --> Q_hat
int Q_hat_rows = Q_cols;
int Q_hat_cols = Q_rows;
double *Q_hat;
Q_hat = (double *)malloc(Q_hat_rows * Q_hat_cols * sizeof(double));
mul(Q_rows, n_factors, n_factors, Q_cols, Q_hat_rows, Q_hat_cols, X, Y, Q_hat);
/* printf("The dot product of X and Y --> Q_hat is \n");
for (int i = 0; i<(Q_hat_rows * Q_hat_cols); i++)
{
printf("%f \t", Q_hat[i]);
}
printf("\n"); */
//subtraction of Q_hat from Q --> sub
const int ARRAY_BYTES = Q_rows * Q_cols * sizeof(double);
double * sub;
sub = (double *)malloc(Q_rows * Q_cols * sizeof(double));
double * dX_input;
double * dY_input;
double * D_sub;
cudaMalloc((void**) &dX_input, ARRAY_BYTES);
cudaMalloc((void**) &dY_input, ARRAY_BYTES);
cudaMalloc((void**) &D_sub, ARRAY_BYTES);
cudaMemcpy(dX_input, Q, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(dY_input, Q_hat, ARRAY_BYTES, cudaMemcpyHostToDevice);
Subtraction<<<1, (Q_rows * Q_cols)>>>(D_sub, dX_input, dY_input);
cudaMemcpy(sub, D_sub, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(dX_input);
cudaFree(dY_input);
cudaFree(D_sub);
/* printf("subtraction of Q_hat from Q --> sub is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", sub[i]);
}
printf("\n"); */
// element by element multiplication of W and sub --> mul_result
//const int ARRAY_BYTES = Q_rows * Q_cols * sizeof(double);
double * mul_result;
mul_result = (double *)malloc(Q_rows * Q_cols * sizeof(double));
double * D_mul;
cudaMalloc((void**) &dX_input, ARRAY_BYTES);
cudaMalloc((void**) &dY_input, ARRAY_BYTES);
cudaMalloc((void**) &D_mul, ARRAY_BYTES);
cudaMemcpy(dX_input, W, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(dY_input, sub, ARRAY_BYTES, cudaMemcpyHostToDevice);
element_multiplication<<<1, (Q_rows * Q_cols)>>>(D_mul, dX_input, dY_input);
cudaMemcpy(mul_result, D_mul, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(dX_input);
cudaFree(dY_input);
cudaFree(D_mul);
/* printf("element by element multiplication of W and sub --> mul_result is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", mul_result[i]);
}
printf("\n"); */
// element by element multiplication of mul_result and mul_result --> squares
double * D_SoS;
cudaMalloc((void**) &dX_input, ARRAY_BYTES);
cudaMalloc((void**) &dY_input, ARRAY_BYTES);
cudaMalloc((void**) &D_SoS, ARRAY_BYTES);
cudaMemcpy(dX_input, mul_result, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(dY_input, mul_result, ARRAY_BYTES, cudaMemcpyHostToDevice);
element_multiplication<<<1, (Q_rows * Q_cols)>>>(D_SoS, dX_input, dY_input);
cudaMemcpy(squares, D_SoS, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(dX_input);
cudaFree(dY_input);
cudaFree(D_SoS);
/* printf("element by element multiplication of mul_result and mul_result --> squares is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", squares[i]);
}
printf("\n"); */
}
int main()
{
int Q_rows = 4;
int Q_cols = 5;
double Q_temp[20] = {3, 4, 0, 2, 0, 0, 2, 1, 0, 0, 1, 0, 3, 4, 0, 0, 0, 0, 1, 3};
double *Q;
Q = (double *)malloc(Q_rows * Q_cols * sizeof(double));
memcpy(Q, Q_temp, sizeof(double)*20);
int n_factors = 3;
double lambda = 0.1;
//int n_iterations = 20;
int X_rows = 4;
int X_cols = n_factors;
double X_temp[12] = {1.04537429, 3.45278132, 3.47493422, 2.24801288, 4.88137731, 1.66288503, 4.81317032, 4.63570752, 1.36892613, 3.32203655, 3.31923711, 2.27048096};
double *X;
X = (double *)malloc(X_rows * X_cols * sizeof(double));
memcpy(X, X_temp, sizeof(double)*12);
//for(int i = 0; i<12;i++)
// printf("%.8f \t", X[i]);
printf("\n");
int Y_rows = n_factors;
int Y_cols = 5;
double Y_temp[15] = {1.59982314, 4.78360092, 3.45781337, 3.13286951, 0.50542705, 3.83681956, 2.88250821, 1.1667597 , 2.43170423, 4.06026517, 0.65686686, 2.94705632, 0.46822364, 1.98082364, 1.54905706};
double *Y;
Y = (double *)malloc(Y_rows * Y_cols * sizeof(double));
memcpy(Y, Y_temp, sizeof(double)*15);
int W_rows = Q_rows;
int W_cols = Q_cols;
double W_temp[20] = {1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1};
double *W;
W = (double *)malloc(W_rows * W_cols * sizeof(double));
memcpy(W, W_temp, sizeof(double)*20);
/* for (int i = 0; i<20; i++)
{
printf("%f \t", W[i]);
}
printf("\n");
*/
//Main computation starts here with n_iterations
for(int it = 0; it<20;it++)
{
//updating all rows of X starts here
double *Wu;
Wu = (double *)malloc(W_cols * sizeof(double));
//Wu[0] = 1; Wu[1] = 1; Wu[2] = 0; Wu[3] = 1; Wu[4] = 0;
double *Qu;
Qu = (double *)malloc(W_cols * sizeof(double));
//Qu[0] = 3; Qu[1] = 4; Qu[2] = 0; Qu[3] = 2; Qu[4] = 0;
//#pragma omp parallel for private(Wu, Qu)
for(int u = 0; u < W_rows; u++)
{
for(int k = 0; k < W_cols; k++)
{
Wu[k] = W[u*W_cols + k];
Qu[k] = Q[u*W_cols + k];
}
als_solve_X(X, u, Y, Wu, n_factors, lambda, Qu, Y_rows, Y_cols, W_rows, W_cols);
}
/* printf("X with updated row is \n");
for (int i = 0; i< 12; i++)
{
printf("%f \t", X[i]);
}
printf("\n"); */
//updating all columns of Y starts here
double *Wi;
Wi = (double *)malloc(W_rows * sizeof(double));
//Wi[0] = 1; Wi[1] = 0; Wi[2] = 1; Wi[3] = 0;
double *Qi;
Qi = (double *)malloc(W_rows * sizeof(double));
//Qi[0] = 3; Qi[1] = 0; Qi[2] = 1; Qi[3] = 0;
//take the transpose of W to get WT
double *WT;
WT = (double *)malloc(W_cols * W_rows * sizeof(double));
calling_trans(W_rows, W_cols, W, WT);
/* printf("find W.T --> WT is \n");
for(int i = 0; i<(W_cols * W_rows); i++){
printf("%f \t", WT[i]);
}
printf("\n"); */
//take the transpose of Q to get QT
double *QT;
QT = (double *)malloc(Q_cols * Q_rows * sizeof(double));
calling_trans(Q_rows, Q_cols, Q, QT);
//take the transpose of Y to get YT
double *YT;
YT = (double *)malloc(Y_cols * Y_rows * sizeof(double));
calling_trans(Y_rows, Y_cols, Y, YT);
//#pragma omp parallel for private(Wi, Qi)
for(int i_Y = 0; i_Y < W_cols; i_Y++)
{
for(int p = 0; p < W_rows; p++)
{
Wi[p] = WT[i_Y*W_rows + p];
Qi[p] = QT[i_Y*W_rows + p];
}
als_solve_Y(YT, i_Y, X, Wi, n_factors, lambda, Qi, X_rows, X_cols, W_rows, W_cols);
}
calling_trans(Y_cols, Y_rows, YT, Y);
//call rmse() function to get rmse values
double * Squares;
Squares = (double *)malloc(Q_rows * Q_cols * sizeof(double));
rmse(Squares, Q, X, Y, W, Q_rows, Q_cols, n_factors);
double error = 0;
for(int i = 0; i < (Q_rows * Q_cols); i++)
{
error = error + Squares[i];
}
printf("%f \n", error);
} //n-iterations end here
/* printf("X with updated row is \n");
for (int i = 0; i< 12; i++)
{
printf("%f \t", X[i]);
}
printf("\n");
printf("Y with updated column is \n");
for (int i = 0; i< 15; i++)
{
printf("%f \t", Y[i]);
}
printf("\n"); */
//dot product of X and Y starts here AFTER the end of n_iterations --> Q_hat
int Q_hat_rows = Q_cols;
int Q_hat_cols = Q_rows;
double *Q_hat;
Q_hat = (double *)malloc(Q_hat_rows * Q_hat_cols * sizeof(double));
mul(Q_rows, Y_rows, Y_rows, Q_cols, Q_hat_rows, Q_hat_cols, X, Y, Q_hat);
printf("The dot product of X and Y --> Q_hat is \n");
for (int i = 0; i<(Q_hat_rows * Q_hat_cols); i++)
{
printf("%f \t", Q_hat[i]);
}
printf("\n");
printf("Q is \n");
for (int i = 0; i<(Q_rows * Q_cols); i++)
{
printf("%f \t", Q[i]);
}
printf("\n");
// printing the recommendations
for(int i = 0; i < Q_rows; i++)
{
for(int j = 0; j < Q_cols; j++)
{
if((Q[i * Q_cols + j] == 0) && (Q_hat[i * Q_cols + j] > 1.5))
{
printf("user %d may like movie %d \n", i, j);
}
}
}
}
|
499e4ce43d0c9f83b466898d546fddf6c7c38a71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
// 0,0 0,1 0,2
// 1,0 1,1 1,2
// => 0, 1, 2, 3, 4, 5
// => numberOfColumns * currentRow + currentColumn
__global__ void matrixmult(float* Cptr, float* Aptr, float* Bptr, int m, int n) {
// blockDim.x = number of threads in the current Block
// threadIdx.x = index of current thread
int Cidx = blockIdx.x * blockDim.x + threadIdx.x; // ^= n * i + k
int i = Cidx / n;
int k = Cidx - n * i;
if (n * m > Cidx) {
for (int j = 0; j < n; j++) {
Cptr[Cidx] += Aptr[n * i + j] * Bptr[n * j + k];
}
}
}
float* createRandomMatrix(float *matrix, int m, int n) {
matrix = new float[m * n];
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
matrix[n * r + c] = static_cast <float> (rand() % 10) / 1.0;
}
}
return matrix;
}
float* createEmptyMatrix(float* matrix, int m, int n) {
matrix = new float[m * n];
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
matrix[n * r + c] = 0.0;
}
}
return matrix;
}
void print(float* matrix, int m, int n) {
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
std::cout << matrix[n * r + c] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
void deleteMatrix(float* matrix) {
delete[] matrix;
}
int main() {
int m = 1440;
int n = 1440;
int block_size = 512;
//float pointer initialisieren und Speicher fr den Array reservieren
float* matrixA = (float*)malloc(m * n);
float* matrixB = (float*)malloc(m * n);
float* h_matrixC = (float*)malloc(m * n);
float* d_matrixA;
float* d_matrixB;
float* d_matrixC;
/*lowerbound = 0;
upperbound = m;*/
matrixA = createRandomMatrix(matrixA, m, n);
matrixB = createRandomMatrix(matrixB, m, n);
h_matrixC = createEmptyMatrix(h_matrixC, m, n);
//Allocate space for device copies in device memory
hipMalloc(&d_matrixA, (m * n) * sizeof(float));
hipMalloc(&d_matrixB, (m * n) * sizeof(float));
hipMalloc(&d_matrixC, (m * n) * sizeof(float));
//hipMalloc(&d_lowerbound, sizeof(int));
//hipMalloc(&d_upperbound, sizeof(int));
//print(matrixA, m, n);
//print(matrixB, m, n);
hipMemcpy(d_matrixA, matrixA, (m * n) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_matrixB, matrixB, (m * n) * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_matrixC, h_matrixC, (m * n) * sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(d_lowerbound, &lowerbound, sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(d_upperbound, &upperbound, sizeof(int), hipMemcpyHostToDevice);
int Blocks = ((n*m) + block_size - 1) / block_size;
std::cout << "[+] Calculation started with " << (Blocks * block_size) << " Threads";
auto start = high_resolution_clock::now();
//Run Kernel on GPU
hipLaunchKernelGGL(( matrixmult) , dim3(Blocks), dim3(block_size) , 0, 0, d_matrixC, d_matrixA, d_matrixB, m, n);
//Wait for GPU to finish
hipDeviceSynchronize();
auto stop = high_resolution_clock::now();
hipMemcpy(h_matrixC, d_matrixC, (m * n) * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "\n[+] Multithreaded calculation finished \n[+] Duration: " << duration<double>(stop - start).count() << " seconds";
/*print(h_matrixC, m, n);*/
//Free memory
hipFree(d_matrixA);
hipFree(d_matrixB);
hipFree(d_matrixC);
delete[] matrixA;
delete[] matrixB;
delete[] h_matrixC;
} | 499e4ce43d0c9f83b466898d546fddf6c7c38a71.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
// 0,0 0,1 0,2
// 1,0 1,1 1,2
// => 0, 1, 2, 3, 4, 5
// => numberOfColumns * currentRow + currentColumn
__global__ void matrixmult(float* Cptr, float* Aptr, float* Bptr, int m, int n) {
// blockDim.x = number of threads in the current Block
// threadIdx.x = index of current thread
int Cidx = blockIdx.x * blockDim.x + threadIdx.x; // ^= n * i + k
int i = Cidx / n;
int k = Cidx - n * i;
if (n * m > Cidx) {
for (int j = 0; j < n; j++) {
Cptr[Cidx] += Aptr[n * i + j] * Bptr[n * j + k];
}
}
}
float* createRandomMatrix(float *matrix, int m, int n) {
matrix = new float[m * n];
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
matrix[n * r + c] = static_cast <float> (rand() % 10) / 1.0;
}
}
return matrix;
}
float* createEmptyMatrix(float* matrix, int m, int n) {
matrix = new float[m * n];
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
matrix[n * r + c] = 0.0;
}
}
return matrix;
}
void print(float* matrix, int m, int n) {
for (int r = 0; r < m; r++) {
for (int c = 0; c < n; c++) {
std::cout << matrix[n * r + c] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
}
void deleteMatrix(float* matrix) {
delete[] matrix;
}
int main() {
int m = 1440;
int n = 1440;
int block_size = 512;
//float pointer initialisieren und Speicher fรผr den Array reservieren
float* matrixA = (float*)malloc(m * n);
float* matrixB = (float*)malloc(m * n);
float* h_matrixC = (float*)malloc(m * n);
float* d_matrixA;
float* d_matrixB;
float* d_matrixC;
/*lowerbound = 0;
upperbound = m;*/
matrixA = createRandomMatrix(matrixA, m, n);
matrixB = createRandomMatrix(matrixB, m, n);
h_matrixC = createEmptyMatrix(h_matrixC, m, n);
//Allocate space for device copies in device memory
cudaMalloc(&d_matrixA, (m * n) * sizeof(float));
cudaMalloc(&d_matrixB, (m * n) * sizeof(float));
cudaMalloc(&d_matrixC, (m * n) * sizeof(float));
//cudaMalloc(&d_lowerbound, sizeof(int));
//cudaMalloc(&d_upperbound, sizeof(int));
//print(matrixA, m, n);
//print(matrixB, m, n);
cudaMemcpy(d_matrixA, matrixA, (m * n) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB, matrixB, (m * n) * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixC, h_matrixC, (m * n) * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_lowerbound, &lowerbound, sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(d_upperbound, &upperbound, sizeof(int), cudaMemcpyHostToDevice);
int Blocks = ((n*m) + block_size - 1) / block_size;
std::cout << "[+] Calculation started with " << (Blocks * block_size) << " Threads";
auto start = high_resolution_clock::now();
//Run Kernel on GPU
matrixmult <<<Blocks, block_size >>> (d_matrixC, d_matrixA, d_matrixB, m, n);
//Wait for GPU to finish
cudaDeviceSynchronize();
auto stop = high_resolution_clock::now();
cudaMemcpy(h_matrixC, d_matrixC, (m * n) * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "\n[+] Multithreaded calculation finished \n[+] Duration: " << duration<double>(stop - start).count() << " seconds";
/*print(h_matrixC, m, n);*/
//Free memory
cudaFree(d_matrixA);
cudaFree(d_matrixB);
cudaFree(d_matrixC);
delete[] matrixA;
delete[] matrixB;
delete[] h_matrixC;
} |
dc809e9b6604776b6d6bc4ed500bdde3f74e6cdb.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <gauge_field.h>
#include <quda_matrix.h>
#include <svd_quda.h>
#include <hisq_links_quda.h>
namespace quda{
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
#ifndef FL_UNITARIZE_PI23
#define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*2.0/3.0
#endif
__constant__ int INPUT_PADDING=0;
__constant__ int OUTPUT_PADDING=0;
__constant__ int DEV_MAX_ITER = 20;
static int HOST_MAX_ITER = 20;
__constant__ double DEV_FL_MAX_ERROR;
__constant__ double DEV_FL_UNITARIZE_EPS;
__constant__ bool DEV_FL_REUNIT_ALLOW_SVD;
__constant__ bool DEV_FL_REUNIT_SVD_ONLY;
__constant__ double DEV_FL_REUNIT_SVD_REL_ERROR;
__constant__ double DEV_FL_REUNIT_SVD_ABS_ERROR;
__constant__ bool DEV_FL_CHECK_UNITARIZATION;
static double HOST_FL_MAX_ERROR;
static double HOST_FL_UNITARIZE_EPS;
static bool HOST_FL_REUNIT_ALLOW_SVD;
static bool HOST_FL_REUNIT_SVD_ONLY;
static double HOST_FL_REUNIT_SVD_REL_ERROR;
static double HOST_FL_REUNIT_SVD_ABS_ERROR;
static bool HOST_FL_CHECK_UNITARIZATION;
void setUnitarizeLinksPadding(int input_padding_h, int output_padding_h)
{
hipMemcpyToSymbol(INPUT_PADDING, &input_padding_h, sizeof(int));
hipMemcpyToSymbol(OUTPUT_PADDING, &output_padding_h, sizeof(int));
return;
}
template<class Cmplx>
__device__ __host__
bool isUnitary(const Matrix<Cmplx,3>& matrix, double max_error)
{
const Matrix<Cmplx,3> identity = conj(matrix)*matrix;
for(int i=0; i<3; ++i){
if( fabs(identity(i,i).x - 1.0) > max_error || fabs(identity(i,i).y) > max_error) return false;
for(int j=i+1; j<3; ++j){
if( fabs(identity(i,j).x) > max_error || fabs(identity(i,j).y) > max_error
|| fabs(identity(j,i).x) > max_error || fabs(identity(j,i).y) > max_error ){
return false;
}
}
}
return true;
}
template<class Cmplx>
__device__ __host__
bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix,
const Matrix<Cmplx,3>& unitary_matrix,
double max_error)
{
Matrix<Cmplx,3> temporary;
temporary = conj(initial_matrix)*unitary_matrix;
temporary = temporary*temporary - conj(initial_matrix)*initial_matrix;
for(int i=0; i<3; ++i){
for(int j=0; j<3; ++j){
if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){
return false;
}
}
}
return true;
}
void setUnitarizeLinksConstants(double unitarize_eps_h, double max_error_h,
bool allow_svd_h, bool svd_only_h,
double svd_rel_error_h, double svd_abs_error_h,
bool check_unitarization_h)
{
// not_set is only initialised once
static bool not_set=true;
if(not_set){
hipMemcpyToSymbol(DEV_FL_UNITARIZE_EPS, &unitarize_eps_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_REUNIT_ALLOW_SVD, &allow_svd_h, sizeof(bool));
hipMemcpyToSymbol(DEV_FL_REUNIT_SVD_ONLY, &svd_only_h, sizeof(bool));
hipMemcpyToSymbol(DEV_FL_REUNIT_SVD_REL_ERROR, &svd_rel_error_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_REUNIT_SVD_ABS_ERROR, &svd_abs_error_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_MAX_ERROR, &max_error_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_CHECK_UNITARIZATION, &check_unitarization_h, sizeof(bool));
HOST_FL_UNITARIZE_EPS = unitarize_eps_h;
HOST_FL_REUNIT_ALLOW_SVD = allow_svd_h;
HOST_FL_REUNIT_SVD_ONLY = svd_only_h;
HOST_FL_REUNIT_SVD_REL_ERROR = svd_rel_error_h;
HOST_FL_REUNIT_SVD_ABS_ERROR = svd_abs_error_h;
HOST_FL_MAX_ERROR = max_error_h;
HOST_FL_CHECK_UNITARIZATION = check_unitarization_h;
not_set = false;
}
checkCudaError();
return;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for(int i=1; i<size; ++i){
T abs_val = fabs(array[i]);
if((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Cmplx>
__device__ __host__
bool reciprocalRoot(const Matrix<Cmplx,3>& q, Matrix<Cmplx,3>* res){
Matrix<Cmplx,3> qsq, tempq;
typename RealTypeId<Cmplx>::Type c[3];
typename RealTypeId<Cmplx>::Type g[3];
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x/2.0;
c[2] = getTrace(tempq).x/3.0;
g[0] = g[1] = g[2] = c[0]/3.;
typename RealTypeId<Cmplx>::Type r,s,theta;
s = c[1]/3. - c[0]*c[0]/18;
#ifdef __CUDA_ARCH__
#define FL_UNITARIZE_EPS DEV_FL_UNITARIZE_EPS
#else
#define FL_UNITARIZE_EPS HOST_FL_UNITARIZE_EPS
#endif
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_REL_ERROR DEV_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR DEV_FL_REUNIT_SVD_ABS_ERROR
#else // cpu
#define FL_REUNIT_SVD_REL_ERROR HOST_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR HOST_FL_REUNIT_SVD_ABS_ERROR
#endif
typename RealTypeId<Cmplx>::Type cosTheta;
if(fabs(s) >= FL_UNITARIZE_EPS){
const typename RealTypeId<Cmplx>::Type sqrt_s = sqrt(s);
r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.);
cosTheta = r/(sqrt_s*sqrt_s*sqrt_s);
if(fabs(cosTheta) >= 1.0){
if( r > 0 ){
theta = 0.0;
}else{
theta = FL_UNITARIZE_PI;
}
}else{
theta = acos(cosTheta);
}
g[0] = c[0]/3 + 2*sqrt_s*cos( theta/3 );
g[1] = c[0]/3 + 2*sqrt_s*cos( theta/3 + FL_UNITARIZE_PI23 );
g[2] = c[0]/3 + 2*sqrt_s*cos( theta/3 + 2*FL_UNITARIZE_PI23 );
}
// Check the eigenvalues, if the determinant does not match the product of the eigenvalues
// return false. Then call SVD instead.
typename RealTypeId<Cmplx>::Type det = getDeterminant(q).x;
if( fabs(det) < FL_REUNIT_SVD_ABS_ERROR ){
return false;
}
if( checkRelativeError(g[0]*g[1]*g[2],det,FL_REUNIT_SVD_REL_ERROR) == false ) return false;
// At this point we have finished with the c's
// use these to store sqrt(g)
for(int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
const typename RealTypeId<Cmplx>::Type & denominator = g[2]*(g[0]*g[1]-g[2]);
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator;
c[2] = g[0]/denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
*res = tempq;
return true;
}
template<class Cmplx>
__host__ __device__
bool unitarizeLinkMILC(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u;
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_ONLY DEV_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD DEV_FL_REUNIT_ALLOW_SVD
#else
#define FL_REUNIT_SVD_ONLY HOST_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD HOST_FL_REUNIT_ALLOW_SVD
#endif
if( !FL_REUNIT_SVD_ONLY ){
if( reciprocalRoot<Cmplx>(conj(in)*in,&u) ){
*result = in*u;
return true;
}
}
// If we've got this far, then the Caley-Hamilton unitarization
// has failed. If SVD is not allowed, the unitarization has failed.
if( !FL_REUNIT_ALLOW_SVD ) return false;
Matrix<Cmplx,3> v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u, v I guess
*result = u*conj(v);
return true;
} // unitarizeMILC
template<class Cmplx>
__host__ __device__
bool unitarizeLinkSVD(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u,v I guess
*result = u*conj(v);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#endif
if(isUnitary(*result,FL_MAX_ERROR)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Link unitarity test failed\n");
printf("TOLERANCE: %g\n", FL_MAX_ERROR);
#endif
return false;
}
return true;
}
#undef FL_MAX_ERROR
template<class Cmplx>
__host__ __device__
bool unitarizeLinkNewton(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, uinv;
u = in;
#ifdef __CUDA_ARCH__
#define MAX_ITER DEV_MAX_ITER
#else
#define MAX_ITER HOST_MAX_ITER
#endif
for(int i=0; i<MAX_ITER; ++i){
computeMatrixInverse(u, &uinv);
u = 0.5*(u + conj(uinv));
}
#undef MAX_ITER
if(isUnitarizedLinkConsistent(in,u,0.0000001)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Unitarized link is not consistent with incoming link\n");
#endif
return false;
}
*result = u;
return true;
}
template<class Cmplx>
__global__ void getUnitarizedField(const Cmplx* inlink_even, const Cmplx* inlink_odd,
Cmplx* outlink_even, Cmplx* outlink_odd,
int* num_failures, const int threads)
{
int mem_idx = blockIdx.x*blockDim.x + threadIdx.x;
if (mem_idx >= threads) return;
const Cmplx* inlink;
Cmplx* outlink;
inlink = inlink_even;
outlink = outlink_even;
if(mem_idx >= Vh){
mem_idx = mem_idx - Vh;
inlink = inlink_odd;
outlink = outlink_odd;
}
// Unitarization is always done in double precision
Matrix<double2,3> v, result;
for(int dir=0; dir<4; ++dir){
loadLinkVariableFromArray(inlink, dir, mem_idx, Vh+INPUT_PADDING, &v);
unitarizeLinkMILC(v, &result);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION DEV_FL_CHECK_UNITARIZATION
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION HOST_FL_CHECK_UNITARIZATION
#endif
if(FL_CHECK_UNITARIZATION){
if(isUnitary(result,FL_MAX_ERROR) == false)
{
#ifdef __CUDA_ARCH__
atomicAdd(num_failures, 1);
#else
(*num_failures)++;
#endif
}
}
writeLinkVariableToArray(result, dir, mem_idx, Vh+OUTPUT_PADDING, outlink);
}
return;
}
class UnitarizeLinksCuda : public Tunable {
private:
const cudaGaugeField &inField;
cudaGaugeField &outField;
int *fails;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
// generalize Tunable::advanceBlockDim() to also set gridDim, with extra checking to ensure that gridDim isn't too large for the device
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step = deviceProp.warpSize;
const int threads = inField.Volume();
bool ret;
param.block.x += step;
if (param.block.x > max_threads || sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3((threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
ret = false;
} else {
ret = true;
}
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
return ret;
}
public:
UnitarizeLinksCuda(const cudaGaugeField& inField, cudaGaugeField& outField, int* fails) :
inField(inField), outField(outField), fails(fails) { ; }
virtual ~UnitarizeLinksCuda() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(inField.Precision() == QUDA_SINGLE_PRECISION){
hipLaunchKernelGGL(( getUnitarizedField), dim3(tp.grid),dim3(tp.block), 0, 0, (float2*)inField.Even_p(), (float2*)inField.Odd_p(),
(float2*)outField.Even_p(), (float2*)outField.Odd_p(),
fails, inField.Volume());
}else if(inField.Precision() == QUDA_DOUBLE_PRECISION){
hipLaunchKernelGGL(( getUnitarizedField), dim3(tp.grid),dim3(tp.block), 0, 0, (double2*)inField.Even_p(), (double2*)inField.Odd_p(),
(double2*)outField.Even_p(), (double2*)outField.Odd_p(),
fails, inField.Volume());
} else {
errorQuda("UnitarizeLinks not implemented for precision %d", inField.Precision());
}
}
void preTune() { ; }
void postTune() { hipMemset(fails, 0, sizeof(int)); } // reset fails counter
virtual void initTuneParam(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const int threads = inField.Volume();
const int step = deviceProp.warpSize;
param.block = dim3((threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
param.shared_bytes = sharedBytesPerThread()*param.block.x > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x : sharedBytesPerBlock(param);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
long long flops() const { return 0; } // FIXME: add flops counter
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << inField.X()[0] << "x";
vol << inField.X()[1] << "x";
vol << inField.X()[2] << "x";
vol << inField.X()[3] << "x";
aux << "threads=" << inField.Volume() << ",prec=" << inField.Precision();
aux << "stride=" << inField.Stride();
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
}; // UnitarizeLinksCuda
void unitarizeLinksCuda(const QudaGaugeParam& param,
cudaGaugeField& inField,
cudaGaugeField* outField,
int* fails) {
UnitarizeLinksCuda unitarizeLinks(inField, *outField, fails);
unitarizeLinks.apply(0);
}
void unitarizeLinksCPU(const QudaGaugeParam& param, cpuGaugeField& infield, cpuGaugeField* outfield)
{
int num_failures = 0;
Matrix<double2,3> inlink, outlink;
for(int i=0; i<infield.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((float*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((double*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
} // precision?
} // dir
} // loop over volume
return;
}
// CPU function which checks that the gauge field is unitary
bool isUnitary(const QudaGaugeParam& param, cpuGaugeField& field, double max_error)
{
Matrix<double2,3> link, identity;
for(int i=0; i<field.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else{
errorQuda("Unsupported precision\n");
}
if(isUnitary(link,max_error) == false){
printf("Unitarity failure\n");
printf("site index = %d,\t direction = %d\n", i, dir);
printLink(link);
identity = conj(link)*link;
printLink(identity);
return false;
}
} // dir
} // i
return true;
} // is unitary
} // namespace quda
| dc809e9b6604776b6d6bc4ed500bdde3f74e6cdb.cu | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <gauge_field.h>
#include <quda_matrix.h>
#include <svd_quda.h>
#include <hisq_links_quda.h>
namespace quda{
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
#ifndef FL_UNITARIZE_PI23
#define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*2.0/3.0
#endif
__constant__ int INPUT_PADDING=0;
__constant__ int OUTPUT_PADDING=0;
__constant__ int DEV_MAX_ITER = 20;
static int HOST_MAX_ITER = 20;
__constant__ double DEV_FL_MAX_ERROR;
__constant__ double DEV_FL_UNITARIZE_EPS;
__constant__ bool DEV_FL_REUNIT_ALLOW_SVD;
__constant__ bool DEV_FL_REUNIT_SVD_ONLY;
__constant__ double DEV_FL_REUNIT_SVD_REL_ERROR;
__constant__ double DEV_FL_REUNIT_SVD_ABS_ERROR;
__constant__ bool DEV_FL_CHECK_UNITARIZATION;
static double HOST_FL_MAX_ERROR;
static double HOST_FL_UNITARIZE_EPS;
static bool HOST_FL_REUNIT_ALLOW_SVD;
static bool HOST_FL_REUNIT_SVD_ONLY;
static double HOST_FL_REUNIT_SVD_REL_ERROR;
static double HOST_FL_REUNIT_SVD_ABS_ERROR;
static bool HOST_FL_CHECK_UNITARIZATION;
void setUnitarizeLinksPadding(int input_padding_h, int output_padding_h)
{
cudaMemcpyToSymbol(INPUT_PADDING, &input_padding_h, sizeof(int));
cudaMemcpyToSymbol(OUTPUT_PADDING, &output_padding_h, sizeof(int));
return;
}
template<class Cmplx>
__device__ __host__
bool isUnitary(const Matrix<Cmplx,3>& matrix, double max_error)
{
const Matrix<Cmplx,3> identity = conj(matrix)*matrix;
for(int i=0; i<3; ++i){
if( fabs(identity(i,i).x - 1.0) > max_error || fabs(identity(i,i).y) > max_error) return false;
for(int j=i+1; j<3; ++j){
if( fabs(identity(i,j).x) > max_error || fabs(identity(i,j).y) > max_error
|| fabs(identity(j,i).x) > max_error || fabs(identity(j,i).y) > max_error ){
return false;
}
}
}
return true;
}
template<class Cmplx>
__device__ __host__
bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix,
const Matrix<Cmplx,3>& unitary_matrix,
double max_error)
{
Matrix<Cmplx,3> temporary;
temporary = conj(initial_matrix)*unitary_matrix;
temporary = temporary*temporary - conj(initial_matrix)*initial_matrix;
for(int i=0; i<3; ++i){
for(int j=0; j<3; ++j){
if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){
return false;
}
}
}
return true;
}
void setUnitarizeLinksConstants(double unitarize_eps_h, double max_error_h,
bool allow_svd_h, bool svd_only_h,
double svd_rel_error_h, double svd_abs_error_h,
bool check_unitarization_h)
{
// not_set is only initialised once
static bool not_set=true;
if(not_set){
cudaMemcpyToSymbol(DEV_FL_UNITARIZE_EPS, &unitarize_eps_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_REUNIT_ALLOW_SVD, &allow_svd_h, sizeof(bool));
cudaMemcpyToSymbol(DEV_FL_REUNIT_SVD_ONLY, &svd_only_h, sizeof(bool));
cudaMemcpyToSymbol(DEV_FL_REUNIT_SVD_REL_ERROR, &svd_rel_error_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_REUNIT_SVD_ABS_ERROR, &svd_abs_error_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_MAX_ERROR, &max_error_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_CHECK_UNITARIZATION, &check_unitarization_h, sizeof(bool));
HOST_FL_UNITARIZE_EPS = unitarize_eps_h;
HOST_FL_REUNIT_ALLOW_SVD = allow_svd_h;
HOST_FL_REUNIT_SVD_ONLY = svd_only_h;
HOST_FL_REUNIT_SVD_REL_ERROR = svd_rel_error_h;
HOST_FL_REUNIT_SVD_ABS_ERROR = svd_abs_error_h;
HOST_FL_MAX_ERROR = max_error_h;
HOST_FL_CHECK_UNITARIZATION = check_unitarization_h;
not_set = false;
}
checkCudaError();
return;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for(int i=1; i<size; ++i){
T abs_val = fabs(array[i]);
if((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Cmplx>
__device__ __host__
bool reciprocalRoot(const Matrix<Cmplx,3>& q, Matrix<Cmplx,3>* res){
Matrix<Cmplx,3> qsq, tempq;
typename RealTypeId<Cmplx>::Type c[3];
typename RealTypeId<Cmplx>::Type g[3];
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x/2.0;
c[2] = getTrace(tempq).x/3.0;
g[0] = g[1] = g[2] = c[0]/3.;
typename RealTypeId<Cmplx>::Type r,s,theta;
s = c[1]/3. - c[0]*c[0]/18;
#ifdef __CUDA_ARCH__
#define FL_UNITARIZE_EPS DEV_FL_UNITARIZE_EPS
#else
#define FL_UNITARIZE_EPS HOST_FL_UNITARIZE_EPS
#endif
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_REL_ERROR DEV_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR DEV_FL_REUNIT_SVD_ABS_ERROR
#else // cpu
#define FL_REUNIT_SVD_REL_ERROR HOST_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR HOST_FL_REUNIT_SVD_ABS_ERROR
#endif
typename RealTypeId<Cmplx>::Type cosTheta;
if(fabs(s) >= FL_UNITARIZE_EPS){
const typename RealTypeId<Cmplx>::Type sqrt_s = sqrt(s);
r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.);
cosTheta = r/(sqrt_s*sqrt_s*sqrt_s);
if(fabs(cosTheta) >= 1.0){
if( r > 0 ){
theta = 0.0;
}else{
theta = FL_UNITARIZE_PI;
}
}else{
theta = acos(cosTheta);
}
g[0] = c[0]/3 + 2*sqrt_s*cos( theta/3 );
g[1] = c[0]/3 + 2*sqrt_s*cos( theta/3 + FL_UNITARIZE_PI23 );
g[2] = c[0]/3 + 2*sqrt_s*cos( theta/3 + 2*FL_UNITARIZE_PI23 );
}
// Check the eigenvalues, if the determinant does not match the product of the eigenvalues
// return false. Then call SVD instead.
typename RealTypeId<Cmplx>::Type det = getDeterminant(q).x;
if( fabs(det) < FL_REUNIT_SVD_ABS_ERROR ){
return false;
}
if( checkRelativeError(g[0]*g[1]*g[2],det,FL_REUNIT_SVD_REL_ERROR) == false ) return false;
// At this point we have finished with the c's
// use these to store sqrt(g)
for(int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
const typename RealTypeId<Cmplx>::Type & denominator = g[2]*(g[0]*g[1]-g[2]);
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator;
c[2] = g[0]/denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
*res = tempq;
return true;
}
template<class Cmplx>
__host__ __device__
bool unitarizeLinkMILC(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u;
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_ONLY DEV_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD DEV_FL_REUNIT_ALLOW_SVD
#else
#define FL_REUNIT_SVD_ONLY HOST_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD HOST_FL_REUNIT_ALLOW_SVD
#endif
if( !FL_REUNIT_SVD_ONLY ){
if( reciprocalRoot<Cmplx>(conj(in)*in,&u) ){
*result = in*u;
return true;
}
}
// If we've got this far, then the Caley-Hamilton unitarization
// has failed. If SVD is not allowed, the unitarization has failed.
if( !FL_REUNIT_ALLOW_SVD ) return false;
Matrix<Cmplx,3> v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u, v I guess
*result = u*conj(v);
return true;
} // unitarizeMILC
template<class Cmplx>
__host__ __device__
bool unitarizeLinkSVD(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u,v I guess
*result = u*conj(v);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#endif
if(isUnitary(*result,FL_MAX_ERROR)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Link unitarity test failed\n");
printf("TOLERANCE: %g\n", FL_MAX_ERROR);
#endif
return false;
}
return true;
}
#undef FL_MAX_ERROR
template<class Cmplx>
__host__ __device__
bool unitarizeLinkNewton(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, uinv;
u = in;
#ifdef __CUDA_ARCH__
#define MAX_ITER DEV_MAX_ITER
#else
#define MAX_ITER HOST_MAX_ITER
#endif
for(int i=0; i<MAX_ITER; ++i){
computeMatrixInverse(u, &uinv);
u = 0.5*(u + conj(uinv));
}
#undef MAX_ITER
if(isUnitarizedLinkConsistent(in,u,0.0000001)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Unitarized link is not consistent with incoming link\n");
#endif
return false;
}
*result = u;
return true;
}
template<class Cmplx>
__global__ void getUnitarizedField(const Cmplx* inlink_even, const Cmplx* inlink_odd,
Cmplx* outlink_even, Cmplx* outlink_odd,
int* num_failures, const int threads)
{
int mem_idx = blockIdx.x*blockDim.x + threadIdx.x;
if (mem_idx >= threads) return;
const Cmplx* inlink;
Cmplx* outlink;
inlink = inlink_even;
outlink = outlink_even;
if(mem_idx >= Vh){
mem_idx = mem_idx - Vh;
inlink = inlink_odd;
outlink = outlink_odd;
}
// Unitarization is always done in double precision
Matrix<double2,3> v, result;
for(int dir=0; dir<4; ++dir){
loadLinkVariableFromArray(inlink, dir, mem_idx, Vh+INPUT_PADDING, &v);
unitarizeLinkMILC(v, &result);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION DEV_FL_CHECK_UNITARIZATION
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION HOST_FL_CHECK_UNITARIZATION
#endif
if(FL_CHECK_UNITARIZATION){
if(isUnitary(result,FL_MAX_ERROR) == false)
{
#ifdef __CUDA_ARCH__
atomicAdd(num_failures, 1);
#else
(*num_failures)++;
#endif
}
}
writeLinkVariableToArray(result, dir, mem_idx, Vh+OUTPUT_PADDING, outlink);
}
return;
}
class UnitarizeLinksCuda : public Tunable {
private:
const cudaGaugeField &inField;
cudaGaugeField &outField;
int *fails;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
// generalize Tunable::advanceBlockDim() to also set gridDim, with extra checking to ensure that gridDim isn't too large for the device
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step = deviceProp.warpSize;
const int threads = inField.Volume();
bool ret;
param.block.x += step;
if (param.block.x > max_threads || sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3((threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
ret = false;
} else {
ret = true;
}
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
return ret;
}
public:
UnitarizeLinksCuda(const cudaGaugeField& inField, cudaGaugeField& outField, int* fails) :
inField(inField), outField(outField), fails(fails) { ; }
virtual ~UnitarizeLinksCuda() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(inField.Precision() == QUDA_SINGLE_PRECISION){
getUnitarizedField<<<tp.grid,tp.block>>>((float2*)inField.Even_p(), (float2*)inField.Odd_p(),
(float2*)outField.Even_p(), (float2*)outField.Odd_p(),
fails, inField.Volume());
}else if(inField.Precision() == QUDA_DOUBLE_PRECISION){
getUnitarizedField<<<tp.grid,tp.block>>>((double2*)inField.Even_p(), (double2*)inField.Odd_p(),
(double2*)outField.Even_p(), (double2*)outField.Odd_p(),
fails, inField.Volume());
} else {
errorQuda("UnitarizeLinks not implemented for precision %d", inField.Precision());
}
}
void preTune() { ; }
void postTune() { cudaMemset(fails, 0, sizeof(int)); } // reset fails counter
virtual void initTuneParam(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const int threads = inField.Volume();
const int step = deviceProp.warpSize;
param.block = dim3((threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
param.shared_bytes = sharedBytesPerThread()*param.block.x > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x : sharedBytesPerBlock(param);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
long long flops() const { return 0; } // FIXME: add flops counter
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << inField.X()[0] << "x";
vol << inField.X()[1] << "x";
vol << inField.X()[2] << "x";
vol << inField.X()[3] << "x";
aux << "threads=" << inField.Volume() << ",prec=" << inField.Precision();
aux << "stride=" << inField.Stride();
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
}; // UnitarizeLinksCuda
void unitarizeLinksCuda(const QudaGaugeParam& param,
cudaGaugeField& inField,
cudaGaugeField* outField,
int* fails) {
UnitarizeLinksCuda unitarizeLinks(inField, *outField, fails);
unitarizeLinks.apply(0);
}
void unitarizeLinksCPU(const QudaGaugeParam& param, cpuGaugeField& infield, cpuGaugeField* outfield)
{
int num_failures = 0;
Matrix<double2,3> inlink, outlink;
for(int i=0; i<infield.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((float*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((double*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
} // precision?
} // dir
} // loop over volume
return;
}
// CPU function which checks that the gauge field is unitary
bool isUnitary(const QudaGaugeParam& param, cpuGaugeField& field, double max_error)
{
Matrix<double2,3> link, identity;
for(int i=0; i<field.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else{
errorQuda("Unsupported precision\n");
}
if(isUnitary(link,max_error) == false){
printf("Unitarity failure\n");
printf("site index = %d,\t direction = %d\n", i, dir);
printLink(link);
identity = conj(link)*link;
printLink(identity);
return false;
}
} // dir
} // i
return true;
} // is unitary
} // namespace quda
|
31f1e22738311b34c1cacd3296298bb08278c9d3.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _DELITE_CUDA_
#define _DELITE_CUDA_
#include "DeliteCuda.h"
using namespace std;
list<void*>* lastAlloc = new list<void*>();
queue<FreeItem>* freeList = new queue<FreeItem>();
map<void*,list<void*>*>* cudaMemoryMap = new map<void*,list<void*>*>();
void freeCudaMemory(FreeItem item) {
list< pair<void*,bool> >::iterator iter;
for (iter = item.keys->begin(); iter != item.keys->end(); iter++) {
//cout << "object ref: " << (long) *iter << endl;
if(cudaMemoryMap->find((*iter).first) != cudaMemoryMap->end()) {
list<void*>* freePtrList = cudaMemoryMap->find((*iter).first)->second;
list<void*>::iterator iter2;
for (iter2 = freePtrList->begin(); iter2 != freePtrList->end(); iter2++) {
void* freePtr = *iter2;
hipFree(freePtr);
//if (hipFree(freePtr) != hipSuccess)
// cout << "bad free pointer: " << (long) freePtr << endl;
//else
//cout << "freed successfully: " << (long) freePtr << endl;
}
cudaMemoryMap->erase((*iter).first);
delete freePtrList;
if(!((*iter).second)) free((*iter).first);
}
}
delete item.keys;
}
void DeliteCudaMalloc(void** ptr, size_t size) {
while (freeList->size() != 0) {
FreeItem item = freeList->front();
if (hipEventQuery(item.event) != hipSuccess) {
break;
}
freeList->pop();
hipEventDestroy(item.event);
freeCudaMemory(item);
}
while (hipMalloc(ptr, size) != hipSuccess) {
if (freeList->size() == 0) {
cout << "FATAL: Insufficient device memory" << endl;
exit(-1);
}
FreeItem item = freeList->front();
freeList->pop();
while (hipEventQuery(item.event) != hipSuccess) {
hipEventSynchronize(item.event);
}
hipEventDestroy(item.event);
freeCudaMemory(item);
}
lastAlloc->push_back(*ptr);
}
/*
void DeliteCudaMalloc(void** ptr, size_t size) {
while (freeList->size() > 0) {
FreeItem item = freeList->front();
freeList->pop();
while (hipEventQuery(item.event) != hipSuccess) {
hipEventSynchronize(item.event);
}
hipEventDestroy(item.event);
list<void*>::iterator iter;
for (iter = item.keys->begin(); iter != item.keys->end(); iter++) {
//cout << "object ref: " << (long) *iter << endl;
list<void*>* freePtrList = cudaMemoryMap->find(*iter)->second;
list<void*>::iterator iter2;
for (iter2 = freePtrList->begin(); iter2 != freePtrList->end(); iter2++) {
void* freePtr = *iter2;
if (hipFree(freePtr) != hipSuccess)
cout << "bad free pointer: " << (long) freePtr << endl;
else
cout << "freed successfully: " << (long) freePtr << endl;
}
cudaMemoryMap->erase(*iter);
delete freePtrList;
free(*iter);
}
delete item.keys;
}
if (hipMalloc(ptr, size) != hipSuccess) {
cout << "FATAL: cuda malloc failed unexpectedly" << endl;
exit(-1);
}
else
cout << "allocated successfully: " << (long) *ptr << endl;
lastAlloc->push_back(*ptr);
} */
char* bufferStart = 0;
size_t bufferSize = 5368709120/4;
char* bufferEnd;
char* bufferCurrent;
void hostInit() {
hipHostMalloc(&bufferStart, bufferSize, hipHostMallocDefault);
bufferEnd = bufferStart + bufferSize;
bufferCurrent = bufferStart;
}
void DeliteCudaMallocHost(void** ptr, size_t size) {
if (bufferStart == 0) hostInit();
if ((bufferCurrent + size) > bufferEnd)
bufferCurrent = bufferStart;
*ptr = bufferCurrent;
bufferCurrent += size;
}
void DeliteCudaMemcpyHtoDAsync(void* dptr, void* sptr, size_t size) {
hipMemcpyAsync(dptr, sptr, size, hipMemcpyHostToDevice, h2dStream);
}
void DeliteCudaMemcpyDtoHAsync(void* dptr, void* sptr, size_t size) {
hipMemcpyAsync(dptr, sptr, size, hipMemcpyDeviceToHost, d2hStream);
hipStreamSynchronize(d2hStream);
}
void DeliteCudaMemcpyDtoDAsync(void *dptr, void* sptr, size_t size) {
hipMemcpyAsync(dptr, sptr, size, hipMemcpyDeviceToDevice, kernelStream);
}
void DeliteCudaMemset(void *ptr, int value, size_t count) {
hipMemset(ptr,value,count);
}
#endif
| 31f1e22738311b34c1cacd3296298bb08278c9d3.cu | #ifndef _DELITE_CUDA_
#define _DELITE_CUDA_
#include "DeliteCuda.h"
using namespace std;
list<void*>* lastAlloc = new list<void*>();
queue<FreeItem>* freeList = new queue<FreeItem>();
map<void*,list<void*>*>* cudaMemoryMap = new map<void*,list<void*>*>();
void freeCudaMemory(FreeItem item) {
list< pair<void*,bool> >::iterator iter;
for (iter = item.keys->begin(); iter != item.keys->end(); iter++) {
//cout << "object ref: " << (long) *iter << endl;
if(cudaMemoryMap->find((*iter).first) != cudaMemoryMap->end()) {
list<void*>* freePtrList = cudaMemoryMap->find((*iter).first)->second;
list<void*>::iterator iter2;
for (iter2 = freePtrList->begin(); iter2 != freePtrList->end(); iter2++) {
void* freePtr = *iter2;
cudaFree(freePtr);
//if (cudaFree(freePtr) != cudaSuccess)
// cout << "bad free pointer: " << (long) freePtr << endl;
//else
//cout << "freed successfully: " << (long) freePtr << endl;
}
cudaMemoryMap->erase((*iter).first);
delete freePtrList;
if(!((*iter).second)) free((*iter).first);
}
}
delete item.keys;
}
void DeliteCudaMalloc(void** ptr, size_t size) {
while (freeList->size() != 0) {
FreeItem item = freeList->front();
if (cudaEventQuery(item.event) != cudaSuccess) {
break;
}
freeList->pop();
cudaEventDestroy(item.event);
freeCudaMemory(item);
}
while (cudaMalloc(ptr, size) != cudaSuccess) {
if (freeList->size() == 0) {
cout << "FATAL: Insufficient device memory" << endl;
exit(-1);
}
FreeItem item = freeList->front();
freeList->pop();
while (cudaEventQuery(item.event) != cudaSuccess) {
cudaEventSynchronize(item.event);
}
cudaEventDestroy(item.event);
freeCudaMemory(item);
}
lastAlloc->push_back(*ptr);
}
/*
void DeliteCudaMalloc(void** ptr, size_t size) {
while (freeList->size() > 0) {
FreeItem item = freeList->front();
freeList->pop();
while (cudaEventQuery(item.event) != cudaSuccess) {
cudaEventSynchronize(item.event);
}
cudaEventDestroy(item.event);
list<void*>::iterator iter;
for (iter = item.keys->begin(); iter != item.keys->end(); iter++) {
//cout << "object ref: " << (long) *iter << endl;
list<void*>* freePtrList = cudaMemoryMap->find(*iter)->second;
list<void*>::iterator iter2;
for (iter2 = freePtrList->begin(); iter2 != freePtrList->end(); iter2++) {
void* freePtr = *iter2;
if (cudaFree(freePtr) != cudaSuccess)
cout << "bad free pointer: " << (long) freePtr << endl;
else
cout << "freed successfully: " << (long) freePtr << endl;
}
cudaMemoryMap->erase(*iter);
delete freePtrList;
free(*iter);
}
delete item.keys;
}
if (cudaMalloc(ptr, size) != cudaSuccess) {
cout << "FATAL: cuda malloc failed unexpectedly" << endl;
exit(-1);
}
else
cout << "allocated successfully: " << (long) *ptr << endl;
lastAlloc->push_back(*ptr);
} */
char* bufferStart = 0;
size_t bufferSize = 5368709120/4;
char* bufferEnd;
char* bufferCurrent;
void hostInit() {
cudaHostAlloc(&bufferStart, bufferSize, cudaHostAllocDefault);
bufferEnd = bufferStart + bufferSize;
bufferCurrent = bufferStart;
}
void DeliteCudaMallocHost(void** ptr, size_t size) {
if (bufferStart == 0) hostInit();
if ((bufferCurrent + size) > bufferEnd)
bufferCurrent = bufferStart;
*ptr = bufferCurrent;
bufferCurrent += size;
}
void DeliteCudaMemcpyHtoDAsync(void* dptr, void* sptr, size_t size) {
cudaMemcpyAsync(dptr, sptr, size, cudaMemcpyHostToDevice, h2dStream);
}
void DeliteCudaMemcpyDtoHAsync(void* dptr, void* sptr, size_t size) {
cudaMemcpyAsync(dptr, sptr, size, cudaMemcpyDeviceToHost, d2hStream);
cudaStreamSynchronize(d2hStream);
}
void DeliteCudaMemcpyDtoDAsync(void *dptr, void* sptr, size_t size) {
cudaMemcpyAsync(dptr, sptr, size, cudaMemcpyDeviceToDevice, kernelStream);
}
void DeliteCudaMemset(void *ptr, int value, size_t count) {
cudaMemset(ptr,value,count);
}
#endif
|
cfcdd781bb780bc857cadbb5cf579e74b93364a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Azzam Haidar
@author Tingxing Dong
@generated from zgemv_batched.cu normal z -> s, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define sgemv_bs 32
extern __shared__ float shared_data[];
__global__ void
kernel_sgemvn_batched(
int m, int n, float alpha,
float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy)
{
float *A = dA_array[blockIdx.x];
float *x = x_array[blockIdx.x];
float *y = y_array[blockIdx.x];
int tx = threadIdx.x;
float res = MAGMA_S_ZERO;
float *buff = (float*)shared_data;
if(tx < n)
{
buff[tx] = x[tx*incx];
}
__syncthreads();
if(tx < m )
{
for(int j=0; j < n ; j++)
{
res += A[tx]*buff[j];
A += lda;
}
y[tx*incy] = alpha * res + y[tx*incy] * beta;
}
}
/*
Matrix Non-transpose Vector Multiplication
y := alpha*A*x + beta*y,
*/
extern "C"
void magmablas_sgemvn_batched(
int m, int n,
float alpha, float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy,
int batchCount)
{
if( m > 512 || n > 512)
{
fprintf( stderr, "m=%d, n=%d, sgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_sgemv instead", m, n, 512);
return ;
}
dim3 grid(batchCount, 1, 1);
dim3 threads(max(m,n), 1, 1);
hipLaunchKernelGGL(( kernel_sgemvn_batched), dim3(grid), dim3(threads), n * sizeof(float) , 0, m, n, alpha, dA_array, lda, x_array, incx,
beta, y_array, incy);
}
__global__ void
kernel_sgemvt_batched(
int m, int n, int m1, float alpha,
float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy)
{
float *A_ptr = dA_array[blockIdx.x];
float *x_ptr = x_array[blockIdx.x];
float *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
float res = MAGMA_S_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ float sdata[sgemv_bs];
for(int i=0; i<m1; i+= sgemv_bs)
{
res += A_ptr[i] * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += A_ptr[m1] * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(sgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Transpose Vector Multiplication
y := alpha* A**T *x + beta*y,
*/
extern "C"
void magmablas_sgemvt_batched(
int m, int n,
float alpha, float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(sgemv_bs, 1, 1);
int m1 = (m / sgemv_bs) * sgemv_bs;
hipLaunchKernelGGL(( kernel_sgemvt_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#if defined(PRECISION_z) || defined (PRECISION_c)
__global__ void
kernel_sgemvc_batched(
int m, int n, int m1, float alpha,
float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy)
{
float *A_ptr = dA_array[blockIdx.x];
float *x_ptr = x_array[blockIdx.x];
float *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
float res = MAGMA_S_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ float sdata[sgemv_bs];
for(int i=0; i<m1; i+= sgemv_bs)
{
res += MAGMA_S_CNJG (A_ptr[i]) * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += MAGMA_S_CNJG(A_ptr[m1]) * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(sgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Conjugate Transpose Vector Multiplication
y := alpha* A**H *x + beta*y,
*/
extern "C"
void magmablas_sgemvc_batched(
int m, int n,
float alpha, float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(sgemv_bs, 1, 1);
int m1 = (m / sgemv_bs) * sgemv_bs;
hipLaunchKernelGGL(( kernel_sgemvc_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#endif // defined(PRECISION_z) || defined (PRECISION_c)
/**
Purpose
-------
This routine computes Y = alpha opt(A) x + beta y, on the GPU, where
A = dA_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1].
This is a batched version.
@param[in]
trans CHARACTER*1.
On entry, TRANS specifies the form of op( A ) to be used in
the matrix multiplication as follows:
= 'N': op( A ) = A.
= 'T': op( A ) = A**T.
= 'C': op( A ) = A**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix opt(A).
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix opt(A)
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array A = dA_array[i]
A: REAL array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in]
x_array x = x_array[i]
x: REAL array of dimension.
n if trans == MagmaNoTrans.
m if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incx INTEGER.
incx specifies the increment for the elments of x.
incx must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta.
@param[out]
y_array y = y_array[i]:
On exit y = alpha opt(A) x + beta y.
y: REAL array of dimension.
m if trans == MagmaNoTrans.
n if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incy INTEGER.
incy specifies the increment for the elments of y.
incy must not be zero.
@param[in]
batchCount INTEGER
number of pointers contained in dA_array, x_array and y_array.
@ingroup magma_sblas2
******************************************************************* */
extern "C"
void magmablas_sgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_ptr dA_array[], magma_int_t ldda,
magmaFloat_ptr dx_array[], magma_int_t incx,
float beta,
magmaFloat_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if(m==0 || n ==0 ) return;
if ( trans == MagmaNoTrans ) {
magmablas_sgemvn_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaTrans ) {
magmablas_sgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaConjTrans ) {
#if defined(PRECISION_z) || defined (PRECISION_c)
magmablas_sgemvc_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#else
magmablas_sgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#endif
}
else {
fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) );
}
}
#undef sgemv_bs
| cfcdd781bb780bc857cadbb5cf579e74b93364a3.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@author Azzam Haidar
@author Tingxing Dong
@generated from zgemv_batched.cu normal z -> s, Sat Nov 15 19:53:59 2014
*/
#include "common_magma.h"
#define sgemv_bs 32
extern __shared__ float shared_data[];
__global__ void
kernel_sgemvn_batched(
int m, int n, float alpha,
float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy)
{
float *A = dA_array[blockIdx.x];
float *x = x_array[blockIdx.x];
float *y = y_array[blockIdx.x];
int tx = threadIdx.x;
float res = MAGMA_S_ZERO;
float *buff = (float*)shared_data;
if(tx < n)
{
buff[tx] = x[tx*incx];
}
__syncthreads();
if(tx < m )
{
for(int j=0; j < n ; j++)
{
res += A[tx]*buff[j];
A += lda;
}
y[tx*incy] = alpha * res + y[tx*incy] * beta;
}
}
/*
Matrix Non-transpose Vector Multiplication
y := alpha*A*x + beta*y,
*/
extern "C"
void magmablas_sgemvn_batched(
int m, int n,
float alpha, float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy,
int batchCount)
{
if( m > 512 || n > 512)
{
fprintf( stderr, "m=%d, n=%d, sgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_sgemv instead", m, n, 512);
return ;
}
dim3 grid(batchCount, 1, 1);
dim3 threads(max(m,n), 1, 1);
kernel_sgemvn_batched<<< grid, threads, n * sizeof(float) >>>( m, n, alpha, dA_array, lda, x_array, incx,
beta, y_array, incy);
}
__global__ void
kernel_sgemvt_batched(
int m, int n, int m1, float alpha,
float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy)
{
float *A_ptr = dA_array[blockIdx.x];
float *x_ptr = x_array[blockIdx.x];
float *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
float res = MAGMA_S_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ float sdata[sgemv_bs];
for(int i=0; i<m1; i+= sgemv_bs)
{
res += A_ptr[i] * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += A_ptr[m1] * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(sgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Transpose Vector Multiplication
y := alpha* A**T *x + beta*y,
*/
extern "C"
void magmablas_sgemvt_batched(
int m, int n,
float alpha, float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(sgemv_bs, 1, 1);
int m1 = (m / sgemv_bs) * sgemv_bs;
kernel_sgemvt_batched <<< grid, threads >>>(m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#if defined(PRECISION_z) || defined (PRECISION_c)
__global__ void
kernel_sgemvc_batched(
int m, int n, int m1, float alpha,
float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy)
{
float *A_ptr = dA_array[blockIdx.x];
float *x_ptr = x_array[blockIdx.x];
float *y_ptr = y_array[blockIdx.x];
int tx = threadIdx.x;
float res = MAGMA_S_ZERO;
if(tx<m)
{
A_ptr += lda * blockIdx.y + tx;
x_ptr += tx * incx;
}
__shared__ float sdata[sgemv_bs];
for(int i=0; i<m1; i+= sgemv_bs)
{
res += MAGMA_S_CNJG (A_ptr[i]) * x_ptr[i*incx];
}
if(m > m1)
{
if( tx + m1 < m )
{
res += MAGMA_S_CNJG(A_ptr[m1]) * x_ptr[m1*incx];
}
else
{
res = res;
}
}
sdata[tx] = res;
__syncthreads();
for(int s=blockDim.x/2; s>32;s>>=1)
{
if(tx<s)
{
sdata[tx] += sdata[tx+s];
}
__syncthreads();
}
if(sgemv_bs > 32)
{
if(tx<32)
{
sdata[tx] += sdata[tx+32];
}
}
if(tx == 0)
{
for(int i=1;i<32;i++)
{
sdata[tx] += sdata[tx + i];
}
y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy];
}
}
/*
Matrix Conjugate Transpose Vector Multiplication
y := alpha* A**H *x + beta*y,
*/
extern "C"
void magmablas_sgemvc_batched(
int m, int n,
float alpha, float **dA_array, int lda,
float **x_array, int incx,
float beta, float **y_array, int incy,
int batchCount)
{
dim3 grid(batchCount, n, 1);
dim3 threads(sgemv_bs, 1, 1);
int m1 = (m / sgemv_bs) * sgemv_bs;
kernel_sgemvc_batched <<< grid, threads >>>(m, n, m1, alpha, dA_array, lda, x_array, incx, beta, y_array, incy);
}
#endif // defined(PRECISION_z) || defined (PRECISION_c)
/**
Purpose
-------
This routine computes Y = alpha opt(A) x + beta y, on the GPU, where
A = dA_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1].
This is a batched version.
@param[in]
trans CHARACTER*1.
On entry, TRANS specifies the form of op( A ) to be used in
the matrix multiplication as follows:
= 'N': op( A ) = A.
= 'T': op( A ) = A**T.
= 'C': op( A ) = A**H.
@param[in]
m INTEGER.
On entry, M specifies the number of rows of the matrix opt(A).
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix opt(A)
@param[in]
alpha REAL.
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array A = dA_array[i]
A: REAL array of dimension ( LDA, n ) on the GPU.
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in]
x_array x = x_array[i]
x: REAL array of dimension.
n if trans == MagmaNoTrans.
m if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incx INTEGER.
incx specifies the increment for the elments of x.
incx must not be zero.
@param[in]
beta REAL.
On entry, BETA specifies the scalar beta.
@param[out]
y_array y = y_array[i]:
On exit y = alpha opt(A) x + beta y.
y: REAL array of dimension.
m if trans == MagmaNoTrans.
n if trans == MagmaTrans or MagmaConjTrans.
@param[in]
incy INTEGER.
incy specifies the increment for the elments of y.
incy must not be zero.
@param[in]
batchCount INTEGER
number of pointers contained in dA_array, x_array and y_array.
@ingroup magma_sblas2
******************************************************************* */
extern "C"
void magmablas_sgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_ptr dA_array[], magma_int_t ldda,
magmaFloat_ptr dx_array[], magma_int_t incx,
float beta,
magmaFloat_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if(m==0 || n ==0 ) return;
if ( trans == MagmaNoTrans ) {
magmablas_sgemvn_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaTrans ) {
magmablas_sgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
}
else if ( trans == MagmaConjTrans ) {
#if defined(PRECISION_z) || defined (PRECISION_c)
magmablas_sgemvc_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#else
magmablas_sgemvt_batched(m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount);
#endif
}
else {
fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) );
}
}
#undef sgemv_bs
|
fbf593016d7fab616e4de2680c827ae6c68c73a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/cvm_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width,
const T* X, T* Y, int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
if (use_cvm) {
if (i % item_width == 0) {
Y[i] = log(X[i] + 1);
} else if (i % item_width == 1) {
Y[i] = log(X[i] + 1) - log(X[i - 1] + 1);
} else {
Y[i] = X[i];
}
} else {
Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2];
}
}
}
template <typename T>
__global__ void CvmGradComputeKernel(const bool use_cvm,
const int64_t item_width, const T* CVM,
const T* DY, T* DX, bool has_lod,
const size_t* lod, size_t lod_size,
int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
int offset = i % item_width;
if (offset <= 1) {
int cvm_id = i / item_width;
if (has_lod) {
int low = 1;
int high = lod_size - 1;
while (low < high) {
int mid = (low + high) / 2;
if (cvm_id < lod[mid])
high = mid;
else
low = mid + 1;
}
cvm_id = low - 1;
}
DX[i] = CVM[2 * cvm_id + offset];
} else {
if (use_cvm) {
DX[i] = DY[i];
} else {
DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2];
}
}
}
}
template <typename T>
class CVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>();
auto batch_size = x->dims()[0];
auto numel = x->numel();
auto item_size = numel / batch_size;
auto use_cvm = context.Attr<bool>("use_cvm");
auto* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace());
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (x->NumLevels() == 0) {
hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, x_data, y_data, y->numel());
} else {
auto lod = x->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Input(X)'s dim[0] must be equal to last element of lod"));
hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, x_data, y_data, y->numel());
}
}
};
template <typename T>
class CVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>();
const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>();
auto use_cvm = context.Attr<bool>("use_cvm");
auto offset = 2;
auto batch_size = dx->dims()[0];
auto dx_numel = dx->numel();
auto item_size = dx_numel / batch_size;
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (dx->NumLevels() == 0) {
hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0,
dx_numel);
} else {
auto lod = dx->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Output(X@GRAD)'s dim[0] must be equal to last element of lod"));
paddle::framework::MixVector<size_t> mixv_lod(&lod);
hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, cvm_data, dout_data, dx_data, true,
mixv_lod.CUDAData(context.GetPlace()), lod.size(), dx_numel);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>,
ops::CVMCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>,
ops::CVMGradCUDAKernel<double>);
| fbf593016d7fab616e4de2680c827ae6c68c73a7.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/cvm_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width,
const T* X, T* Y, int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
if (use_cvm) {
if (i % item_width == 0) {
Y[i] = log(X[i] + 1);
} else if (i % item_width == 1) {
Y[i] = log(X[i] + 1) - log(X[i - 1] + 1);
} else {
Y[i] = X[i];
}
} else {
Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2];
}
}
}
template <typename T>
__global__ void CvmGradComputeKernel(const bool use_cvm,
const int64_t item_width, const T* CVM,
const T* DY, T* DX, bool has_lod,
const size_t* lod, size_t lod_size,
int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
int offset = i % item_width;
if (offset <= 1) {
int cvm_id = i / item_width;
if (has_lod) {
int low = 1;
int high = lod_size - 1;
while (low < high) {
int mid = (low + high) / 2;
if (cvm_id < lod[mid])
high = mid;
else
low = mid + 1;
}
cvm_id = low - 1;
}
DX[i] = CVM[2 * cvm_id + offset];
} else {
if (use_cvm) {
DX[i] = DY[i];
} else {
DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2];
}
}
}
}
template <typename T>
class CVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>();
auto batch_size = x->dims()[0];
auto numel = x->numel();
auto item_size = numel / batch_size;
auto use_cvm = context.Attr<bool>("use_cvm");
auto* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace());
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (x->NumLevels() == 0) {
CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, x_data, y_data, y->numel());
} else {
auto lod = x->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Input(X)'s dim[0] must be equal to last element of lod"));
CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, x_data, y_data, y->numel());
}
}
};
template <typename T>
class CVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>();
const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>();
auto use_cvm = context.Attr<bool>("use_cvm");
auto offset = 2;
auto batch_size = dx->dims()[0];
auto dx_numel = dx->numel();
auto item_size = dx_numel / batch_size;
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (dx->NumLevels() == 0) {
CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0,
dx_numel);
} else {
auto lod = dx->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Output(X@GRAD)'s dim[0] must be equal to last element of lod"));
paddle::framework::MixVector<size_t> mixv_lod(&lod);
CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, cvm_data, dout_data, dx_data, true,
mixv_lod.CUDAData(context.GetPlace()), lod.size(), dx_numel);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>,
ops::CVMCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>,
ops::CVMGradCUDAKernel<double>);
|
0226248fb28a642cdf1cd8757b5622fd00c1397e.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_common.h"
#include "cuda_op.h"
int main(int argc, char** argv)
{
if (argc < 2) {
printf("Usage: %s GPU_ID\n", argv[0]);
return -1;
}
const int gpu_id = atoi(argv[1]);
SetGPUID(gpu_id);
const size_t
n = 1 << 30,
BLOCK_SIZE = 1 << 10,
WARP_SIZE = 1 << 5,
REDUCE_SIZE = (n + WARP_SIZE - 1) / WARP_SIZE;
thrust::device_vector<unsigned> src(n, 1), tmp(REDUCE_SIZE);
const unsigned char opDesc[4][128] = {
"======thrust::reduce=======",
"======shared_sum_kernel=======",
"======warp_primitive_sum_kernel=======",
"======hipcub::BlockReduce reduce_sum_cub======="};
for (int op = 0; op < 4; ++op) {
unsigned sum;
hipEvent_t beg, end;
hipEventCreate(&beg);
hipEventCreate(&end);
hipEventRecord(beg, 0);
if (op == 0) {
sum = thrust::reduce(src.begin(), src.begin() + n);
}
if (op == 1) {
cudaCallReduceSUMSharedMem(n, thrust::raw_pointer_cast(src.data()), thrust::raw_pointer_cast(tmp.data()));
sum = thrust::reduce(tmp.begin(), tmp.begin() + (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
}
if (op == 2) {
cudaCallReduceSUMWarpCom(n, thrust::raw_pointer_cast(src.data()), thrust::raw_pointer_cast(tmp.data()));
sum = thrust::reduce(tmp.begin(), tmp.begin() + (n + WARP_SIZE - 1) / WARP_SIZE);
}
if (op == 3) {
cubCallReduceSUM(n, thrust::raw_pointer_cast(src.data()), thrust::raw_pointer_cast(tmp.data()));
sum = thrust::reduce(tmp.begin(), tmp.begin() + (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
}
hipEventRecord(end, 0);
hipEventSynchronize(beg);
hipEventSynchronize(end);
float elapsed_time;
hipEventElapsedTime(
&elapsed_time,
beg,
end);
std::cout << opDesc[op] << std::endl;
std::cout << sum << ": " << elapsed_time << " ms elapsed." << std::endl;
std::cout << std::endl;
// printf("%u : %fms elapsed.\n", sum, elapsed_time);
}
return 0;
}
| 0226248fb28a642cdf1cd8757b5622fd00c1397e.cu | #include "cuda_common.h"
#include "cuda_op.h"
int main(int argc, char** argv)
{
if (argc < 2) {
printf("Usage: %s GPU_ID\n", argv[0]);
return -1;
}
const int gpu_id = atoi(argv[1]);
SetGPUID(gpu_id);
const size_t
n = 1 << 30,
BLOCK_SIZE = 1 << 10,
WARP_SIZE = 1 << 5,
REDUCE_SIZE = (n + WARP_SIZE - 1) / WARP_SIZE;
thrust::device_vector<unsigned> src(n, 1), tmp(REDUCE_SIZE);
const unsigned char opDesc[4][128] = {
"======thrust::reduce=======",
"======shared_sum_kernel=======",
"======warp_primitive_sum_kernel=======",
"======cub::BlockReduce reduce_sum_cub======="};
for (int op = 0; op < 4; ++op) {
unsigned sum;
cudaEvent_t beg, end;
cudaEventCreate(&beg);
cudaEventCreate(&end);
cudaEventRecord(beg, 0);
if (op == 0) {
sum = thrust::reduce(src.begin(), src.begin() + n);
}
if (op == 1) {
cudaCallReduceSUMSharedMem(n, thrust::raw_pointer_cast(src.data()), thrust::raw_pointer_cast(tmp.data()));
sum = thrust::reduce(tmp.begin(), tmp.begin() + (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
}
if (op == 2) {
cudaCallReduceSUMWarpCom(n, thrust::raw_pointer_cast(src.data()), thrust::raw_pointer_cast(tmp.data()));
sum = thrust::reduce(tmp.begin(), tmp.begin() + (n + WARP_SIZE - 1) / WARP_SIZE);
}
if (op == 3) {
cubCallReduceSUM(n, thrust::raw_pointer_cast(src.data()), thrust::raw_pointer_cast(tmp.data()));
sum = thrust::reduce(tmp.begin(), tmp.begin() + (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
}
cudaEventRecord(end, 0);
cudaEventSynchronize(beg);
cudaEventSynchronize(end);
float elapsed_time;
cudaEventElapsedTime(
&elapsed_time,
beg,
end);
std::cout << opDesc[op] << std::endl;
std::cout << sum << ": " << elapsed_time << " ms elapsed." << std::endl;
std::cout << std::endl;
// printf("%u : %fms elapsed.\n", sum, elapsed_time);
}
return 0;
}
|
03fde295ecaefa5675d953ab07f566bd500d1120.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include<random>
using namespace std;
#define X_trn(x, y) X_trn[x * size_train + y] // 196 * 964
#define X_tst(x, y) X_tst[x * size_test + y] // 196 * 414
#define Y_trn(x, y) Y_trn[x * size_train + y] // 1 * 964
#define Y_tst(x, y) Y_tst[x * size_test + y] // 1 * 414
#define X(x, y) X[x * size_batch + y] // 196 * 964
#define Y(x, y) Y[x * size_batch + y] // 1 * 414
#define W1(x, y) W1[x * size_input + y] // 20 * 196
#define b1(x, y) b1[x * 1 + y] // 20 * 1
#define W2(x, y) W2[x * size_hidden + y] // 2 * 20
#define b2(x, y) b2[x * 1 + y] // 2 * 1
#define dW1(x, y) dW1[x * size_input + y] // 20 * 196
#define db1(x, y) db1[x * 1 + y] // 20 * 1
#define dW2(x, y) dW2[x * size_hidden + y] // 2 * 20
#define db2(x, y) db2[x * 1 + y] // 2 * 1
#define Z1(x, y) Z1[x * size_batch + y] // 20 * 964
#define A1(x, y) A1[x * size_batch + y] // 20 * 964
#define Z2(x, y) Z2[x * size_batch + y] // 2 * 964
#define A2(x, y) A2[x * size_batch + y] // 2 * 964
#define dZ1(x, y) dZ1[x * size_batch + y] // 20 * 964
#define dA1(x, y) dA1[x * size_batch + y] // 20 * 964
#define dZ2(x, y) dZ2[x * size_batch + y] // 2 * 964
#define dA2(x, y) dA2[x * size_batch + y] // 2 * 964
#define max_index(x, y) max_index[y] // 1 * 964
#define dev_X_trn(x, y) dev_X_trn[x * size_train + y] // 196 * 964
#define dev_X_tst(x, y) dev_X_tst[x * size_test + y] // 196 * 414
#define dev_Y_trn(x, y) dev_Y_trn[x * size_train + y] // 1 * 964
#define dev_Y_tst(x, y) dev_Y_tst[x * size_test + y] // 1 * 414
#define dev_X(x, y) dev_X[x * size_batch + y] // 196 * 964
#define dev_Y(x, y) dev_Y[x * size_batch + y] // 1 * 414
#define dev_W1(x, y) dev_W1[x * size_input + y] // 20 * 196
#define dev_b1(x, y) dev_b1[x * 1 + y] // 20 * 1
#define dev_W2(x, y) dev_W2[x * size_hidden + y] // 2 * 20
#define dev_b2(x, y) dev_b2[x * 1 + y] // 2 * 1
#define dev_dW1(x, y) dev_dW1[x * size_input + y] // 20 * 196
#define dev_db1(x, y) dev_db1[x * 1 + y] // 20 * 1
#define dev_dW2(x, y) dev_dW2[x * size_hidden + y] // 2 * 20
#define dev_db2(x, y) dev_db2[x * 1 + y] // 2 * 1
#define dev_Z1(x, y) dev_Z1[x * size_batch + y] // 20 * 964
#define dev_A1(x, y) dev_A1[x * size_batch + y] // 20 * 964
#define dev_Z2(x, y) dev_Z2[x * size_batch + y] // 2 * 964
#define dev_A2(x, y) dev_A2[x * size_batch + y] // 2 * 964
#define dev_dZ1(x, y) dev_dZ1[x * size_batch + y] // 20 * 964
#define dev_dA1(x, y) dev_dA1[x * size_batch + y] // 20 * 964
#define dev_dZ2(x, y) dev_dZ2[x * size_batch + y] // 2 * 964
#define dev_dA2(x, y) dev_dA2[x * size_batch + y] // 2 * 964
#define dev_max_index(x, y) dev_max_index[y] // 1 * 964
#define size_train 964
#define size_test 414
#define size_input 196
#define size_hidden 20
#define size_output 2
#define size_X size_input*size_batch
#define size_Y size_batch
#define size_W1 size_hidden*size_input
#define size_b1 size_hidden*1
#define size_W2 size_output*size_hidden
#define size_b2 size_output*1
#define size_dW1 size_hidden*size_input
#define size_db1 size_hidden*1
#define size_dW2 size_output*size_hidden
#define size_db2 size_output*1
#define size_Z1 size_hidden*size_batch
#define size_A1 size_hidden*size_batch
#define size_Z2 size_output*size_batch
#define size_A2 size_output*size_batch
#define size_dZ1 size_hidden*size_batch
#define size_dA1 size_hidden*size_batch
#define size_dZ2 size_output*size_batch
#define size_dA2 size_output*size_batch
#define size_max_index 1*size_batch
#define size_dev_max_index 1*size_batch
int size_batch = 0;
int *Y_trn, *Y_tst, *max_index, *dev_Y, *dev_max_index;
double *X_trn, *X_tst, *X, *W1, *b1, *W2, *b2, *dW1, *db1, *dW2, *db2, *Z1, *A1, *Z2, *A2, *dZ1, *dA1, *dZ2, *dA2;
double *dev_X, *dev_W1, *dev_b1, *dev_W2, *dev_b2, *dev_dW1, *dev_db1, *dev_dW2, *dev_db2, *dev_Z1, *dev_A1, *dev_Z2, *dev_A2, *dev_dZ1, *dev_dA1, *dev_dZ2, *dev_dA2;
void read_X(string data_path, double* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
void read_Y(string data_path, int* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
/* Set the value and reading data */
void read_data()
{
X_trn = (double *) malloc(size_input*size_train * sizeof(double)); // 196*964
Y_trn = (int *) malloc(size_train * sizeof(int)); // 1*964
X_tst = (double *) malloc(size_input*size_test * sizeof(double)); // 196*414
Y_tst = (int *) malloc(size_train * sizeof(int)); // 1*414
string X_trn_path = "X_trn.csv"; // Defined the name of cvs file
string Y_trn_path = "Y_trn.csv";
string X_tst_path = "X_tst.csv";
string Y_tst_path = "Y_tst.csv";
read_X(X_trn_path, X_trn); //Execution
read_Y(Y_trn_path, Y_trn);
read_X(X_tst_path, X_tst);
read_Y(Y_tst_path, Y_tst);
}
/* init W b */
void initialize_Wb() {
W1 = (double *) malloc(size_W1*sizeof(double)); // 20*196
b1 = (double *) malloc(size_b1*sizeof(double)); // 20*1
W2 = (double *) malloc(size_W2*sizeof(double)); // 2*20
b2 = (double *) malloc(size_b2*sizeof(double)); // 2*1
dW1 = (double *) malloc(size_dW1*sizeof(double)); // 20*196
db1 = (double *) malloc(size_db1*sizeof(double)); // 20*1
dW2 = (double *) malloc(size_dW2*sizeof(double)); // 2*20
db2 = (double *) malloc(size_db2*sizeof(double)); // 2*1
default_random_engine e;
uniform_real_distribution<double> u(-1,1);
for (int i = 0; i < size_W1; i++) {
W1[i] = u(e);
}
for (int i = 0; i < size_W2; i++) {
W2[i] = u(e);
}
for (int i = 0; i < size_b1; i++) {
b1[i] = 0;
}
for (int i = 0; i < size_b2; i++) {
b2[i] = 0;
}
}
/* init Z and A in the host */
void initialize_ZA(int size_batch)
{
Z1 = (double *) malloc(size_Z1*sizeof(double)); // 20*964
A1 = (double *) malloc(size_A1*sizeof(double)); // 20*964
Z2 = (double *) malloc(size_Z2*sizeof(double)); // 2*964
A2 = (double *) malloc(size_A2*sizeof(double)); // 2*964
dZ1 = (double *) malloc(size_dZ1*sizeof(double)); // 20*964
dA1 = (double *) malloc(size_dA1*sizeof(double)); // 20*964
dZ2 = (double *) malloc(size_dZ2*sizeof(double)); // 2*964
dA2 = (double *) malloc(size_dA2*sizeof(double)); // 2*964
max_index = (int *) malloc(size_max_index*sizeof(int)); // 1*964
}
/* init Z and A in the device */
void initialize_dev_ZA(int size_batch)
{
hipMalloc((void**)&dev_X, size_X * sizeof(double));
hipMalloc((void**)&dev_Y, size_Y * sizeof(int));
hipMalloc((void**)&dev_max_index, size_dev_max_index * sizeof(int));
hipMalloc((void**)&dev_Z1, size_Z1 * sizeof(double));
hipMalloc((void**)&dev_A1, size_A1 * sizeof(double));
hipMalloc((void**)&dev_Z2, size_Z2 * sizeof(double));
hipMalloc((void**)&dev_A2, size_A2 * sizeof(double));
}
/* free Z and A in the device */
void free_dev_ZA()
{
hipFree(dev_X);
hipFree(dev_Y);
hipFree(dev_max_index);
hipFree(dev_Z1);
hipFree(dev_A1);
hipFree(dev_Z2);
hipFree(dev_A2);
}
/* init W and b in the device */
void initialize_dev_Wb()
{
hipMalloc((void**)&dev_W1, size_W1 * sizeof(double));
hipMalloc((void**)&dev_b1, size_b1 * sizeof(double));
hipMalloc((void**)&dev_W2, size_W2 * sizeof(double));
hipMalloc((void**)&dev_b2, size_b2 * sizeof(double));
hipMalloc((void**)&dev_dW1, size_dW1 * sizeof(double));
hipMalloc((void**)&dev_db1, size_db1 * sizeof(double));
hipMalloc((void**)&dev_dW2, size_dW2 * sizeof(double));
hipMalloc((void**)&dev_db2, size_db2 * sizeof(double));
}
/* free W and b in the device */
void free_dev_Wb()
{
hipFree(dev_W1);
hipFree(dev_b1);
hipFree(dev_W2);
hipFree(dev_b2);
hipFree(dev_dW1);
hipFree(dev_db1);
hipFree(dev_dW2);
hipFree(dev_db2);
}
/* init dZ and dA in the host */
void initialize_dev_dZA(int size_batch)
{
hipMalloc((void**)&dev_dZ1, size_dZ1 * sizeof(double));
hipMalloc((void**)&dev_dA1, size_dA1 * sizeof(double));
hipMalloc((void**)&dev_dZ2, size_dZ2 * sizeof(double));
hipMalloc((void**)&dev_dA2, size_dA2 * sizeof(double));
}
/* free dZ and dA in the device */
void free_dev_dZA()
{
hipFree(dev_dZ1);
hipFree(dev_dA1);
hipFree(dev_dZ2);
hipFree(dev_dA2);
}
__global__ void HiddenLayer_Sigmoid(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = 1 / (1 + exp(0 - dev_Z1(i,j)));
}
__global__ void HiddenLayer_ReLU(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = dev_Z1(i,j) * (dev_Z1(i,j) > 0);
}
__global__ void OutputLayer(double* dev_A1, double* dev_W2, double* dev_b2, double* dev_Z2, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W2(i,k) * dev_A1(k,j);
dev_Z2(i,j) = partial + dev_b2(i,0);
}
// parallel for column part
__global__ void Softmax(double* dev_Z2, double* dev_A2, int* dev_max_index, int size_batch, int max_row, int max_col)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(j >= max_col)
return;
double max = dev_Z2(0, j), sum = 0;
dev_max_index[j] = 1;
for (int i = 1; i < max_row; i++) {
if (dev_Z2(i, j) > max){
max = dev_Z2(i, j);
dev_max_index[j] = 0;
}
}
for (int i = 0; i < max_row; i++)
sum += exp(dev_Z2(i, j));
for (int i = 0; i < max_row; i++)
dev_A2(i, j) = exp(dev_Z2(i, j)) / sum;
}
__global__ void Back_dZ2 (double* dev_A2, int* dev_Y_trn, double* dev_dZ2, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ2(0, j) = (dev_A2(0, j) - dev_Y_trn(0, j)) / size_batch;
dev_dZ2(1, j) = (dev_Y_trn(0, j) - dev_A2(0, j)) / size_batch;
}
// dW1(20*196) = dZ1(20*964) * X(196*964)
// dW2(2*20) = dZ2(2*964) * A1(20*964)
__global__ void Back_dW (double* dev_A, double* dev_dZ, double* dev_dW, int size_batch, int W_col, int max_row, int max_col)
{
int k;
double tmp = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < size_batch; k++)
tmp += dev_dZ[i*size_batch+k] * dev_A[j*size_batch+k];
dev_dW[i*W_col+j] = tmp;
}
// db1(20*1) is from dZ1(20*964)
// db2(2*1) is from dZ1(2*964)
__global__ void Back_db(double* dev_dZ, double* dev_db, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
if(i >= max_row)
return;
double tmp = 0;
for(int j = 0; j < max_col; j++) {
tmp += dev_dZ[i*size_batch+j];
}
dev_db[i*1+0] = tmp;
}
__global__ void Back_dA1 (double* dev_W2, double* dev_dZ2, double* dev_dA1, int size_batch, int K, int max_row, int max_col)
{
// dA1(20*964) = dZ2(2*964) * W2(2*20)
int k;
double partial = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < K; k++)
partial += dev_W2(k,i) * dev_dZ2(k,j);
dev_dA1(i,j) = partial;
}
__global__ void Back_dZ1_Sigmoid (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ1(i, j) = dev_dA1(i, j) * dev_A1(i, j) * (1-dev_A1(i, j)); // dZ1 = dA1*A1*(1-A1)
}
__global__ void Back_dZ1_ReLU (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
if(dev_Z1(i, j) < 0)
dev_dZ1(i, j) = 0;
else
dev_dZ1(i, j) = dev_dA1(i, j); //dZ1 = dA1*Z1_mask
}
__global__ void update_Wb(double* dev_dWb, double* dev_Wb, int col, double learn_rate, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
dev_Wb[i*col+j] = dev_Wb[i*col+j] - learn_rate * dev_dWb[i*col+j];
}
/* forward to calculate A Z preY */
void forward(double* X, int* Y, string type, int acti_type, int block_size){
if(type == "train"){
size_batch = size_train;
}
else{
size_batch = size_test;
}
// init Z and A in the host
initialize_ZA(size_batch);
// init X Y W b Z A in the device
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// hidden layer and activation function to get Z1 and A1
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden+ dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_W1, W1, size_W1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b1, b1, size_b1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_X, X, size_X * sizeof(double), hipMemcpyHostToDevice);
if(acti_type == 1)
hipLaunchKernelGGL(( HiddenLayer_Sigmoid), dim3(dimGrid1), dim3(dimBlock), 0, 0, dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
hipLaunchKernelGGL(( HiddenLayer_ReLU), dim3(dimGrid1), dim3(dimBlock), 0, 0, dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
hipMemcpy(Z1, dev_Z1, size_Z1 * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(A1, dev_A1, size_A1 * sizeof(double), hipMemcpyDeviceToHost);
// output layer to get Z2
dim3 dimGrid2((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_W2, W2, size_W2 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b2, b2, size_b2 * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( OutputLayer), dim3(dimGrid2), dim3(dimBlock), 0, 0, dev_A1, dev_W2, dev_b2, dev_Z2, size_hidden, size_batch, size_output, size_batch);
hipMemcpy(Z2, dev_Z2, size_Z2 * sizeof(double), hipMemcpyDeviceToHost);
// softmax layer to get A2 and max_index
dim3 dimGrid3((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( Softmax), dim3(dimGrid3), dim3(dimBlock), 0, 0, dev_Z2, dev_A2, dev_max_index, size_batch, size_output, size_batch);
hipMemcpy(A2, dev_A2, size_A2 * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(max_index, dev_max_index, size_max_index * sizeof(int), hipMemcpyDeviceToHost);
free_dev_ZA();
}
/* calculate loss */
double cross_entropy_loss(int* Y, double* A2, int col)
{
double loss = 0;
for(int c = 0; c < col; c++) {
loss += -log(A2(0, c)) * Y(0, c) - log(A2(1, c)) * (1-Y(0, c));
}
return loss/col;
}
/* backward to calculate dW db */
void backprop(double* X, int* Y, int acti_type, int block_size) { // type = 1 is Sigmoid
size_batch = size_train;
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// get dZ2
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (1 + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_A2, A2, size_A2 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_Y, Y, size_Y * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_dZ2, dZ2, size_dZ2 * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Back_dZ2), dim3(dimGrid1), dim3(dimBlock), 0, 0, dev_A2, dev_Y, dev_dZ2, size_batch, 1, size_batch);
hipMemcpy(dZ2, dev_dZ2, size_dZ2 * sizeof(double), hipMemcpyDeviceToHost);
// get dw2
dim3 dimGrid2((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_A1, A1, size_A1 * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Back_dW), dim3(dimGrid2), dim3(dimBlock), 0, 0, dev_A1, dev_dZ2, dev_dW2, size_batch, size_hidden, size_output, size_hidden);
hipMemcpy(dW2, dev_dW2, size_dW2 * sizeof(double), hipMemcpyDeviceToHost);
// get db2
dim3 dimGrid3((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( Back_db), dim3(dimGrid3), dim3(dimBlock), 0, 0, dev_dZ2, dev_db2, size_batch, size_output, size_batch);
hipMemcpy(db2, dev_db2, size_db2 * sizeof(double), hipMemcpyDeviceToHost);
// get dA1
dim3 dimGrid4((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_W2, W2, size_W2 * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Back_dA1), dim3(dimGrid4), dim3(dimBlock), 0, 0, dev_W2, dev_dZ2, dev_dA1, size_batch, size_output, size_hidden, size_batch);
hipMemcpy(dA1, dev_dA1, size_dA1 * sizeof(double), hipMemcpyDeviceToHost);
// get dZ1
dim3 dimGrid5((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_A1, A1, size_A1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_Z1, Z1, size_Z1 * sizeof(double), hipMemcpyHostToDevice);
if(acti_type == 1)
hipLaunchKernelGGL(( Back_dZ1_Sigmoid), dim3(dimGrid5), dim3(dimBlock), 0, 0, dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
hipLaunchKernelGGL(( Back_dZ1_ReLU), dim3(dimGrid5), dim3(dimBlock), 0, 0, dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
hipMemcpy(dZ1, dev_dZ1, size_dZ1 * sizeof(double), hipMemcpyDeviceToHost);
// get dW1
dim3 dimGrid6((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipMemcpy(dev_X, X, size_X * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Back_dW), dim3(dimGrid6), dim3(dimBlock), 0, 0, dev_X, dev_dZ1, dev_dW1, size_batch, size_input, size_hidden, size_input);
hipMemcpy(dW1, dev_dW1, size_dW1 * sizeof(double), hipMemcpyDeviceToHost);
// get b1
dim3 dimGrid7((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( Back_db), dim3(dimGrid7), dim3(dimBlock), 0, 0, dev_dZ1, dev_db1, size_batch, size_hidden, size_batch);
hipMemcpy(db1, dev_db1, size_db1 * sizeof(double), hipMemcpyDeviceToHost);
// free ZA on device
free_dev_ZA();
}
/* update W b */
void updateParameter(double learn_rate, int block_size)
{
dim3 dimBlock(block_size, block_size);
// update w1
hipMemcpy(dev_dW1, dW1, size_dW1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_W1, W1, size_W1 * sizeof(double), hipMemcpyHostToDevice);
dim3 dimGrid1((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid1), dim3(dimBlock), 0, 0, dev_dW1, dev_W1, size_input, learn_rate, size_hidden, size_input);
hipMemcpy(W1, dev_W1, size_W1 * sizeof(double), hipMemcpyDeviceToHost);
// update b1
hipMemcpy(dev_db1, db1, size_db1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b1, b1, size_b1 * sizeof(double), hipMemcpyHostToDevice);
dim3 dimGrid2((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid2), dim3(dimBlock), 0, 0, dev_db1, dev_b1, 1, learn_rate, size_hidden, 1);
hipMemcpy(b1, dev_b1, size_b1 * sizeof(double), hipMemcpyDeviceToHost);
// update w2
hipMemcpy(dev_dW2, dW2, size_dW2 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_W2, W2, size_W2 * sizeof(double), hipMemcpyHostToDevice);
dim3 dimGrid3((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid3), dim3(dimBlock), 0, 0, dev_dW2, dev_W2, size_hidden, learn_rate, size_output, size_hidden);
hipMemcpy(W2, dev_W2, size_W2 * sizeof(double), hipMemcpyDeviceToHost);
// update b2
hipMemcpy(dev_db2, db2, size_db2 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_b2, b2, size_b2 * sizeof(double), hipMemcpyHostToDevice);
dim3 dimGrid4((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
hipLaunchKernelGGL(( update_Wb), dim3(dimGrid4), dim3(dimBlock), 0, 0, dev_db2, dev_b2, 1, learn_rate, size_output, 1);
hipMemcpy(b2, dev_b2, size_b2 * sizeof(double), hipMemcpyDeviceToHost);
}
double accuracy(int* max_index, int* Y, int size_batch)
{
int i;
double count = 0;
for(i = 0; i < size_batch; i++) {
if(Y(0, i) == max_index(0, i))
count += 1;
}
return count/double(size_batch);
}
void train(double* X_trn, int* Y_trn, int acti_type, int block_size)
{
forward(X_trn, Y_trn, "train", acti_type, block_size);
backprop(X_trn, Y_trn, acti_type, block_size); // 1 Sigmoid 2 ReLU
updateParameter(0.01, block_size);
}
double test(double* X, int* Y, string type, int acti_type, int block_size)
{
forward(X, Y, type, acti_type, block_size);
if(type == "train")
return accuracy(max_index, Y, size_train);
else
return accuracy(max_index, Y, size_test);
}
int main(int argc, char *argv[])
{
int block_size;
int epochs = 20000;
int acti_type;
double acc_trn, acc_tst;
if ( argc < 3 ){
printf(" Usage: first argument: dimension of square matrix \n");
printf(" second argument: size of CUDA block \n");
return -1;
} else if ( argc > 3 ) {
printf("\n Too many arguments. \n");
return -1;
} else {
block_size = atoi(argv[1]);
acti_type = atoi(argv[2]);
}
initialize_Wb();
initialize_dev_Wb();
initialize_dev_dZA(size_train);
read_data();
float elapsed_time = 0.0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for(int e = 0; e < epochs; e++) {
train(X_trn, Y_trn, acti_type, block_size);
// double loss = cross_entropy_loss(Y_trn, A2, size_train);
// printf("%f\n", loss);
// printf("the %d epoch, the training loss is: %f \n", e, loss);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf( "Elapsed Time: %.4e msec. \n", elapsed_time );
acc_trn = test(X_trn, Y_trn, "train", acti_type, block_size);
acc_tst = test(X_tst, Y_tst, "test", acti_type, block_size);
printf("the training accuracy is: %f, the test accuracy is: %f\n", acc_trn, acc_tst);
free_dev_Wb();
free_dev_dZA();
} | 03fde295ecaefa5675d953ab07f566bd500d1120.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>
#include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include<random>
using namespace std;
#define X_trn(x, y) X_trn[x * size_train + y] // 196 * 964
#define X_tst(x, y) X_tst[x * size_test + y] // 196 * 414
#define Y_trn(x, y) Y_trn[x * size_train + y] // 1 * 964
#define Y_tst(x, y) Y_tst[x * size_test + y] // 1 * 414
#define X(x, y) X[x * size_batch + y] // 196 * 964
#define Y(x, y) Y[x * size_batch + y] // 1 * 414
#define W1(x, y) W1[x * size_input + y] // 20 * 196
#define b1(x, y) b1[x * 1 + y] // 20 * 1
#define W2(x, y) W2[x * size_hidden + y] // 2 * 20
#define b2(x, y) b2[x * 1 + y] // 2 * 1
#define dW1(x, y) dW1[x * size_input + y] // 20 * 196
#define db1(x, y) db1[x * 1 + y] // 20 * 1
#define dW2(x, y) dW2[x * size_hidden + y] // 2 * 20
#define db2(x, y) db2[x * 1 + y] // 2 * 1
#define Z1(x, y) Z1[x * size_batch + y] // 20 * 964
#define A1(x, y) A1[x * size_batch + y] // 20 * 964
#define Z2(x, y) Z2[x * size_batch + y] // 2 * 964
#define A2(x, y) A2[x * size_batch + y] // 2 * 964
#define dZ1(x, y) dZ1[x * size_batch + y] // 20 * 964
#define dA1(x, y) dA1[x * size_batch + y] // 20 * 964
#define dZ2(x, y) dZ2[x * size_batch + y] // 2 * 964
#define dA2(x, y) dA2[x * size_batch + y] // 2 * 964
#define max_index(x, y) max_index[y] // 1 * 964
#define dev_X_trn(x, y) dev_X_trn[x * size_train + y] // 196 * 964
#define dev_X_tst(x, y) dev_X_tst[x * size_test + y] // 196 * 414
#define dev_Y_trn(x, y) dev_Y_trn[x * size_train + y] // 1 * 964
#define dev_Y_tst(x, y) dev_Y_tst[x * size_test + y] // 1 * 414
#define dev_X(x, y) dev_X[x * size_batch + y] // 196 * 964
#define dev_Y(x, y) dev_Y[x * size_batch + y] // 1 * 414
#define dev_W1(x, y) dev_W1[x * size_input + y] // 20 * 196
#define dev_b1(x, y) dev_b1[x * 1 + y] // 20 * 1
#define dev_W2(x, y) dev_W2[x * size_hidden + y] // 2 * 20
#define dev_b2(x, y) dev_b2[x * 1 + y] // 2 * 1
#define dev_dW1(x, y) dev_dW1[x * size_input + y] // 20 * 196
#define dev_db1(x, y) dev_db1[x * 1 + y] // 20 * 1
#define dev_dW2(x, y) dev_dW2[x * size_hidden + y] // 2 * 20
#define dev_db2(x, y) dev_db2[x * 1 + y] // 2 * 1
#define dev_Z1(x, y) dev_Z1[x * size_batch + y] // 20 * 964
#define dev_A1(x, y) dev_A1[x * size_batch + y] // 20 * 964
#define dev_Z2(x, y) dev_Z2[x * size_batch + y] // 2 * 964
#define dev_A2(x, y) dev_A2[x * size_batch + y] // 2 * 964
#define dev_dZ1(x, y) dev_dZ1[x * size_batch + y] // 20 * 964
#define dev_dA1(x, y) dev_dA1[x * size_batch + y] // 20 * 964
#define dev_dZ2(x, y) dev_dZ2[x * size_batch + y] // 2 * 964
#define dev_dA2(x, y) dev_dA2[x * size_batch + y] // 2 * 964
#define dev_max_index(x, y) dev_max_index[y] // 1 * 964
#define size_train 964
#define size_test 414
#define size_input 196
#define size_hidden 20
#define size_output 2
#define size_X size_input*size_batch
#define size_Y size_batch
#define size_W1 size_hidden*size_input
#define size_b1 size_hidden*1
#define size_W2 size_output*size_hidden
#define size_b2 size_output*1
#define size_dW1 size_hidden*size_input
#define size_db1 size_hidden*1
#define size_dW2 size_output*size_hidden
#define size_db2 size_output*1
#define size_Z1 size_hidden*size_batch
#define size_A1 size_hidden*size_batch
#define size_Z2 size_output*size_batch
#define size_A2 size_output*size_batch
#define size_dZ1 size_hidden*size_batch
#define size_dA1 size_hidden*size_batch
#define size_dZ2 size_output*size_batch
#define size_dA2 size_output*size_batch
#define size_max_index 1*size_batch
#define size_dev_max_index 1*size_batch
int size_batch = 0;
int *Y_trn, *Y_tst, *max_index, *dev_Y, *dev_max_index;
double *X_trn, *X_tst, *X, *W1, *b1, *W2, *b2, *dW1, *db1, *dW2, *db2, *Z1, *A1, *Z2, *A2, *dZ1, *dA1, *dZ2, *dA2;
double *dev_X, *dev_W1, *dev_b1, *dev_W2, *dev_b2, *dev_dW1, *dev_db1, *dev_dW2, *dev_db2, *dev_Z1, *dev_A1, *dev_Z2, *dev_A2, *dev_dZ1, *dev_dA1, *dev_dZ2, *dev_dA2;
void read_X(string data_path, double* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
void read_Y(string data_path, int* array)
{
ifstream inFile(data_path);
string row;
int p;
p = 0;
string value;
while (getline(inFile, row)){
stringstream col(row);
while (getline(col, value, ',')){
array[p] = stod(value);
p++;
}
}
}
/* Set the value and reading data */
void read_data()
{
X_trn = (double *) malloc(size_input*size_train * sizeof(double)); // 196*964
Y_trn = (int *) malloc(size_train * sizeof(int)); // 1*964
X_tst = (double *) malloc(size_input*size_test * sizeof(double)); // 196*414
Y_tst = (int *) malloc(size_train * sizeof(int)); // 1*414
string X_trn_path = "X_trn.csv"; // Defined the name of cvs file
string Y_trn_path = "Y_trn.csv";
string X_tst_path = "X_tst.csv";
string Y_tst_path = "Y_tst.csv";
read_X(X_trn_path, X_trn); //Execution
read_Y(Y_trn_path, Y_trn);
read_X(X_tst_path, X_tst);
read_Y(Y_tst_path, Y_tst);
}
/* init W b */
void initialize_Wb() {
W1 = (double *) malloc(size_W1*sizeof(double)); // 20*196
b1 = (double *) malloc(size_b1*sizeof(double)); // 20*1
W2 = (double *) malloc(size_W2*sizeof(double)); // 2*20
b2 = (double *) malloc(size_b2*sizeof(double)); // 2*1
dW1 = (double *) malloc(size_dW1*sizeof(double)); // 20*196
db1 = (double *) malloc(size_db1*sizeof(double)); // 20*1
dW2 = (double *) malloc(size_dW2*sizeof(double)); // 2*20
db2 = (double *) malloc(size_db2*sizeof(double)); // 2*1
default_random_engine e;
uniform_real_distribution<double> u(-1,1);
for (int i = 0; i < size_W1; i++) {
W1[i] = u(e);
}
for (int i = 0; i < size_W2; i++) {
W2[i] = u(e);
}
for (int i = 0; i < size_b1; i++) {
b1[i] = 0;
}
for (int i = 0; i < size_b2; i++) {
b2[i] = 0;
}
}
/* init Z and A in the host */
void initialize_ZA(int size_batch)
{
Z1 = (double *) malloc(size_Z1*sizeof(double)); // 20*964
A1 = (double *) malloc(size_A1*sizeof(double)); // 20*964
Z2 = (double *) malloc(size_Z2*sizeof(double)); // 2*964
A2 = (double *) malloc(size_A2*sizeof(double)); // 2*964
dZ1 = (double *) malloc(size_dZ1*sizeof(double)); // 20*964
dA1 = (double *) malloc(size_dA1*sizeof(double)); // 20*964
dZ2 = (double *) malloc(size_dZ2*sizeof(double)); // 2*964
dA2 = (double *) malloc(size_dA2*sizeof(double)); // 2*964
max_index = (int *) malloc(size_max_index*sizeof(int)); // 1*964
}
/* init Z and A in the device */
void initialize_dev_ZA(int size_batch)
{
cudaMalloc((void**)&dev_X, size_X * sizeof(double));
cudaMalloc((void**)&dev_Y, size_Y * sizeof(int));
cudaMalloc((void**)&dev_max_index, size_dev_max_index * sizeof(int));
cudaMalloc((void**)&dev_Z1, size_Z1 * sizeof(double));
cudaMalloc((void**)&dev_A1, size_A1 * sizeof(double));
cudaMalloc((void**)&dev_Z2, size_Z2 * sizeof(double));
cudaMalloc((void**)&dev_A2, size_A2 * sizeof(double));
}
/* free Z and A in the device */
void free_dev_ZA()
{
cudaFree(dev_X);
cudaFree(dev_Y);
cudaFree(dev_max_index);
cudaFree(dev_Z1);
cudaFree(dev_A1);
cudaFree(dev_Z2);
cudaFree(dev_A2);
}
/* init W and b in the device */
void initialize_dev_Wb()
{
cudaMalloc((void**)&dev_W1, size_W1 * sizeof(double));
cudaMalloc((void**)&dev_b1, size_b1 * sizeof(double));
cudaMalloc((void**)&dev_W2, size_W2 * sizeof(double));
cudaMalloc((void**)&dev_b2, size_b2 * sizeof(double));
cudaMalloc((void**)&dev_dW1, size_dW1 * sizeof(double));
cudaMalloc((void**)&dev_db1, size_db1 * sizeof(double));
cudaMalloc((void**)&dev_dW2, size_dW2 * sizeof(double));
cudaMalloc((void**)&dev_db2, size_db2 * sizeof(double));
}
/* free W and b in the device */
void free_dev_Wb()
{
cudaFree(dev_W1);
cudaFree(dev_b1);
cudaFree(dev_W2);
cudaFree(dev_b2);
cudaFree(dev_dW1);
cudaFree(dev_db1);
cudaFree(dev_dW2);
cudaFree(dev_db2);
}
/* init dZ and dA in the host */
void initialize_dev_dZA(int size_batch)
{
cudaMalloc((void**)&dev_dZ1, size_dZ1 * sizeof(double));
cudaMalloc((void**)&dev_dA1, size_dA1 * sizeof(double));
cudaMalloc((void**)&dev_dZ2, size_dZ2 * sizeof(double));
cudaMalloc((void**)&dev_dA2, size_dA2 * sizeof(double));
}
/* free dZ and dA in the device */
void free_dev_dZA()
{
cudaFree(dev_dZ1);
cudaFree(dev_dA1);
cudaFree(dev_dZ2);
cudaFree(dev_dA2);
}
__global__ void HiddenLayer_Sigmoid(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = 1 / (1 + exp(0 - dev_Z1(i,j)));
}
__global__ void HiddenLayer_ReLU(double* dev_X, double* dev_W1, double* dev_b1, double* dev_A1, double* dev_Z1, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W1(i,k) * dev_X(k,j);
dev_Z1(i,j) = partial + dev_b1(i,0);
dev_A1(i,j) = dev_Z1(i,j) * (dev_Z1(i,j) > 0);
}
__global__ void OutputLayer(double* dev_A1, double* dev_W2, double* dev_b2, double* dev_Z2, int K, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y;
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
double partial = 0.0;
for (int k = 0; k < K; k++)
partial += dev_W2(i,k) * dev_A1(k,j);
dev_Z2(i,j) = partial + dev_b2(i,0);
}
// parallel for column part
__global__ void Softmax(double* dev_Z2, double* dev_A2, int* dev_max_index, int size_batch, int max_row, int max_col)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(j >= max_col)
return;
double max = dev_Z2(0, j), sum = 0;
dev_max_index[j] = 1;
for (int i = 1; i < max_row; i++) {
if (dev_Z2(i, j) > max){
max = dev_Z2(i, j);
dev_max_index[j] = 0;
}
}
for (int i = 0; i < max_row; i++)
sum += exp(dev_Z2(i, j));
for (int i = 0; i < max_row; i++)
dev_A2(i, j) = exp(dev_Z2(i, j)) / sum;
}
__global__ void Back_dZ2 (double* dev_A2, int* dev_Y_trn, double* dev_dZ2, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ2(0, j) = (dev_A2(0, j) - dev_Y_trn(0, j)) / size_batch;
dev_dZ2(1, j) = (dev_Y_trn(0, j) - dev_A2(0, j)) / size_batch;
}
// dW1(20*196) = dZ1(20*964) * X(196*964)
// dW2(2*20) = dZ2(2*964) * A1(20*964)
__global__ void Back_dW (double* dev_A, double* dev_dZ, double* dev_dW, int size_batch, int W_col, int max_row, int max_col)
{
int k;
double tmp = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < size_batch; k++)
tmp += dev_dZ[i*size_batch+k] * dev_A[j*size_batch+k];
dev_dW[i*W_col+j] = tmp;
}
// db1(20*1) is from dZ1(20*964)
// db2(2*1) is from dZ1(2*964)
__global__ void Back_db(double* dev_dZ, double* dev_db, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
if(i >= max_row)
return;
double tmp = 0;
for(int j = 0; j < max_col; j++) {
tmp += dev_dZ[i*size_batch+j];
}
dev_db[i*1+0] = tmp;
}
__global__ void Back_dA1 (double* dev_W2, double* dev_dZ2, double* dev_dA1, int size_batch, int K, int max_row, int max_col)
{
// dA1(20*964) = dZ2(2*964) * W2(2*20)
int k;
double partial = 0.0;
int i = threadIdx.y + blockIdx.y * blockDim.y; // i/y -> row
int j = threadIdx.x + blockIdx.x * blockDim.x; // j/x -> col
if(i >= max_row || j >= max_col)
return;
for (k = 0; k < K; k++)
partial += dev_W2(k,i) * dev_dZ2(k,j);
dev_dA1(i,j) = partial;
}
__global__ void Back_dZ1_Sigmoid (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
dev_dZ1(i, j) = dev_dA1(i, j) * dev_A1(i, j) * (1-dev_A1(i, j)); // dZ1 = dA1*A1*(1-A1)
}
__global__ void Back_dZ1_ReLU (double* dev_dA1, double* dev_A1, double* dev_Z1, double* dev_dZ1, int size_batch, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x; // column of Z2
if(i >= max_row || j >= max_col)
return;
if(dev_Z1(i, j) < 0)
dev_dZ1(i, j) = 0;
else
dev_dZ1(i, j) = dev_dA1(i, j); //dZ1 = dA1*Z1_mask
}
__global__ void update_Wb(double* dev_dWb, double* dev_Wb, int col, double learn_rate, int max_row, int max_col)
{
int i = threadIdx.y + blockIdx.y * blockDim.y; // y == row; x == col
int j = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= max_row || j >= max_col)
return;
dev_Wb[i*col+j] = dev_Wb[i*col+j] - learn_rate * dev_dWb[i*col+j];
}
/* forward to calculate A Z preY */
void forward(double* X, int* Y, string type, int acti_type, int block_size){
if(type == "train"){
size_batch = size_train;
}
else{
size_batch = size_test;
}
// init Z and A in the host
initialize_ZA(size_batch);
// init X Y W b Z A in the device
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// hidden layer and activation function to get Z1 and A1
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden+ dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W1, W1, size_W1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, size_b1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_X, X, size_X * sizeof(double), cudaMemcpyHostToDevice);
if(acti_type == 1)
HiddenLayer_Sigmoid<<<dimGrid1, dimBlock>>>(dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
HiddenLayer_ReLU<<<dimGrid1, dimBlock>>>(dev_X, dev_W1, dev_b1, dev_A1, dev_Z1, size_input, size_batch, size_hidden, size_batch);
cudaMemcpy(Z1, dev_Z1, size_Z1 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(A1, dev_A1, size_A1 * sizeof(double), cudaMemcpyDeviceToHost);
// output layer to get Z2
dim3 dimGrid2((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, size_b2 * sizeof(double), cudaMemcpyHostToDevice);
OutputLayer<<<dimGrid2, dimBlock>>>(dev_A1, dev_W2, dev_b2, dev_Z2, size_hidden, size_batch, size_output, size_batch);
cudaMemcpy(Z2, dev_Z2, size_Z2 * sizeof(double), cudaMemcpyDeviceToHost);
// softmax layer to get A2 and max_index
dim3 dimGrid3((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
Softmax<<<dimGrid3, dimBlock>>>(dev_Z2, dev_A2, dev_max_index, size_batch, size_output, size_batch);
cudaMemcpy(A2, dev_A2, size_A2 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(max_index, dev_max_index, size_max_index * sizeof(int), cudaMemcpyDeviceToHost);
free_dev_ZA();
}
/* calculate loss */
double cross_entropy_loss(int* Y, double* A2, int col)
{
double loss = 0;
for(int c = 0; c < col; c++) {
loss += -log(A2(0, c)) * Y(0, c) - log(A2(1, c)) * (1-Y(0, c));
}
return loss/col;
}
/* backward to calculate dW db */
void backprop(double* X, int* Y, int acti_type, int block_size) { // type = 1 is Sigmoid
size_batch = size_train;
initialize_dev_ZA(size_batch);
dim3 dimBlock(block_size, block_size);
// get dZ2
dim3 dimGrid1((size_batch + dimBlock.x - 1)/ dimBlock.x, (1 + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_A2, A2, size_A2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Y, Y, size_Y * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dZ2, dZ2, size_dZ2 * sizeof(double), cudaMemcpyHostToDevice);
Back_dZ2<<<dimGrid1, dimBlock>>>(dev_A2, dev_Y, dev_dZ2, size_batch, 1, size_batch);
cudaMemcpy(dZ2, dev_dZ2, size_dZ2 * sizeof(double), cudaMemcpyDeviceToHost);
// get dw2
dim3 dimGrid2((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_A1, A1, size_A1 * sizeof(double), cudaMemcpyHostToDevice);
Back_dW<<<dimGrid2, dimBlock>>>(dev_A1, dev_dZ2, dev_dW2, size_batch, size_hidden, size_output, size_hidden);
cudaMemcpy(dW2, dev_dW2, size_dW2 * sizeof(double), cudaMemcpyDeviceToHost);
// get db2
dim3 dimGrid3((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
Back_db<<<dimGrid3, dimBlock>>>(dev_dZ2, dev_db2, size_batch, size_output, size_batch);
cudaMemcpy(db2, dev_db2, size_db2 * sizeof(double), cudaMemcpyDeviceToHost);
// get dA1
dim3 dimGrid4((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
Back_dA1<<<dimGrid4, dimBlock>>> (dev_W2, dev_dZ2, dev_dA1, size_batch, size_output, size_hidden, size_batch);
cudaMemcpy(dA1, dev_dA1, size_dA1 * sizeof(double), cudaMemcpyDeviceToHost);
// get dZ1
dim3 dimGrid5((size_batch + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_A1, A1, size_A1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Z1, Z1, size_Z1 * sizeof(double), cudaMemcpyHostToDevice);
if(acti_type == 1)
Back_dZ1_Sigmoid<<<dimGrid5, dimBlock>>>(dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
else if(acti_type == 2)
Back_dZ1_ReLU<<<dimGrid5, dimBlock>>>(dev_dA1, dev_A1, dev_Z1, dev_dZ1, size_batch, size_hidden, size_batch);
cudaMemcpy(dZ1, dev_dZ1, size_dZ1 * sizeof(double), cudaMemcpyDeviceToHost);
// get dW1
dim3 dimGrid6((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
cudaMemcpy(dev_X, X, size_X * sizeof(double), cudaMemcpyHostToDevice);
Back_dW<<<dimGrid6, dimBlock>>>(dev_X, dev_dZ1, dev_dW1, size_batch, size_input, size_hidden, size_input);
cudaMemcpy(dW1, dev_dW1, size_dW1 * sizeof(double), cudaMemcpyDeviceToHost);
// get b1
dim3 dimGrid7((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
Back_db<<<dimGrid7, dimBlock>>>(dev_dZ1, dev_db1, size_batch, size_hidden, size_batch);
cudaMemcpy(db1, dev_db1, size_db1 * sizeof(double), cudaMemcpyDeviceToHost);
// free ZA on device
free_dev_ZA();
}
/* update W b */
void updateParameter(double learn_rate, int block_size)
{
dim3 dimBlock(block_size, block_size);
// update w1
cudaMemcpy(dev_dW1, dW1, size_dW1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W1, W1, size_W1 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid1((size_input + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid1, dimBlock>>>(dev_dW1, dev_W1, size_input, learn_rate, size_hidden, size_input);
cudaMemcpy(W1, dev_W1, size_W1 * sizeof(double), cudaMemcpyDeviceToHost);
// update b1
cudaMemcpy(dev_db1, db1, size_db1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, size_b1 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid2((1 + dimBlock.x - 1)/ dimBlock.x, (size_hidden + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid2, dimBlock>>>(dev_db1, dev_b1, 1, learn_rate, size_hidden, 1);
cudaMemcpy(b1, dev_b1, size_b1 * sizeof(double), cudaMemcpyDeviceToHost);
// update w2
cudaMemcpy(dev_dW2, dW2, size_dW2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_W2, W2, size_W2 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid3((size_hidden + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid3, dimBlock>>>(dev_dW2, dev_W2, size_hidden, learn_rate, size_output, size_hidden);
cudaMemcpy(W2, dev_W2, size_W2 * sizeof(double), cudaMemcpyDeviceToHost);
// update b2
cudaMemcpy(dev_db2, db2, size_db2 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, size_b2 * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid4((1 + dimBlock.x - 1)/ dimBlock.x, (size_output + dimBlock.y - 1)/ dimBlock.y);
update_Wb<<<dimGrid4, dimBlock>>>(dev_db2, dev_b2, 1, learn_rate, size_output, 1);
cudaMemcpy(b2, dev_b2, size_b2 * sizeof(double), cudaMemcpyDeviceToHost);
}
double accuracy(int* max_index, int* Y, int size_batch)
{
int i;
double count = 0;
for(i = 0; i < size_batch; i++) {
if(Y(0, i) == max_index(0, i))
count += 1;
}
return count/double(size_batch);
}
void train(double* X_trn, int* Y_trn, int acti_type, int block_size)
{
forward(X_trn, Y_trn, "train", acti_type, block_size);
backprop(X_trn, Y_trn, acti_type, block_size); // 1 Sigmoid 2 ReLU
updateParameter(0.01, block_size);
}
double test(double* X, int* Y, string type, int acti_type, int block_size)
{
forward(X, Y, type, acti_type, block_size);
if(type == "train")
return accuracy(max_index, Y, size_train);
else
return accuracy(max_index, Y, size_test);
}
int main(int argc, char *argv[])
{
int block_size;
int epochs = 20000;
int acti_type;
double acc_trn, acc_tst;
if ( argc < 3 ){
printf(" Usage: first argument: dimension of square matrix \n");
printf(" second argument: size of CUDA block \n");
return -1;
} else if ( argc > 3 ) {
printf("\n Too many arguments. \n");
return -1;
} else {
block_size = atoi(argv[1]);
acti_type = atoi(argv[2]);
}
initialize_Wb();
initialize_dev_Wb();
initialize_dev_dZA(size_train);
read_data();
float elapsed_time = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int e = 0; e < epochs; e++) {
train(X_trn, Y_trn, acti_type, block_size);
// double loss = cross_entropy_loss(Y_trn, A2, size_train);
// printf("%f\n", loss);
// printf("the %d epoch, the training loss is: %f \n", e, loss);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf( "Elapsed Time: %.4e msec. \n", elapsed_time );
acc_trn = test(X_trn, Y_trn, "train", acti_type, block_size);
acc_tst = test(X_tst, Y_tst, "test", acti_type, block_size);
printf("the training accuracy is: %f, the test accuracy is: %f\n", acc_trn, acc_tst);
free_dev_Wb();
free_dev_dZA();
} |
5129d56ba341b6ce0d7b1668b373fd57e6693efd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Concurrent access on an atomic counter. Inter and Intra Region.
*/
#include <stdio.h>
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
hipError_t cuErr = call; \
if(hipSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, hipGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
// Grid dimension
#define B 100
// Block dimension
#define T 512
// Kernel
__global__ void count(int *d_countervar){
atomicAdd(d_countervar,1);
}
// Main program
int main(){
// Device pointer for counter variable
int *d_count;
// Allocation of host counter variable
int *countervar = (int *) malloc(sizeof(int));
// Initialization of the counter variable
countervar[0] = 0;
// Allocation of GPU memory
cudaErrorCheck( hipMalloc(&d_count, sizeof(int)));
// Copying the counter variable from the host to the device
cudaErrorCheck( hipMemcpy(d_count,countervar,sizeof(int),hipMemcpyHostToDevice));
//Launch Kernel
hipLaunchKernelGGL(( count), dim3(B),dim3(T), 0, 0, d_count);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( hipGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( hipDeviceSynchronize());
// Copying the counter variable from the device to the host
cudaErrorCheck( hipMemcpy(countervar,d_count,sizeof(int),hipMemcpyDeviceToHost));
// Verifying result
printf("counter: %i expected: %i \n ", countervar[0], T*B);
// Freeing GPU memory
cudaErrorCheck( hipFree(d_count));
// Freeing CPU memory
free(countervar);
return 0;
} | 5129d56ba341b6ce0d7b1668b373fd57e6693efd.cu | /*
Concurrent access on an atomic counter. Inter and Intra Region.
*/
#include <stdio.h>
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
// Grid dimension
#define B 100
// Block dimension
#define T 512
// Kernel
__global__ void count(int *d_countervar){
atomicAdd(d_countervar,1);
}
// Main program
int main(){
// Device pointer for counter variable
int *d_count;
// Allocation of host counter variable
int *countervar = (int *) malloc(sizeof(int));
// Initialization of the counter variable
countervar[0] = 0;
// Allocation of GPU memory
cudaErrorCheck( cudaMalloc(&d_count, sizeof(int)));
// Copying the counter variable from the host to the device
cudaErrorCheck( cudaMemcpy(d_count,countervar,sizeof(int),cudaMemcpyHostToDevice));
//Launch Kernel
count<<<B,T>>>(d_count);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( cudaGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( cudaDeviceSynchronize());
// Copying the counter variable from the device to the host
cudaErrorCheck( cudaMemcpy(countervar,d_count,sizeof(int),cudaMemcpyDeviceToHost));
// Verifying result
printf("counter: %i expected: %i \n ", countervar[0], T*B);
// Freeing GPU memory
cudaErrorCheck( cudaFree(d_count));
// Freeing CPU memory
free(countervar);
return 0;
} |
aa9e199887e83e65ffcdc3b2b117b579b0b9a131.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tensor/gpu_sparse_tensor.h"
#include "tensor/cpu_sparse_tensor.h"
#include "tensor/t_data.h"
#include "tensor/gpu_dense_tensor.h"
#include "tensor/gpu_reduce_kernel.h"
#include "util/mem_holder.h"
#include <cstring>
#include <cassert>
namespace gnn
{
template<typename Dtype>
TensorTemplate<GPU, CSR_SPARSE, Dtype>::TensorTemplate() : data(nullptr)
{
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::Reshape(std::vector<size_t> l)
{
ASSERT(l.size() == 2, "only support sparse matrix");
this->shape.Reshape(l);
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::ResizeSp(int newNNZ, int newNPtr)
{
if (this->data == nullptr)
this->data = std::make_shared< SparseData<GPU, Dtype> >();
if (newNNZ > data->nzCap || newNPtr > data->ptrCap)
{
if (newNNZ > data->nzCap)
data->nzCap = ::max(newNNZ, data->nzCap * 2);
if (newNPtr > data->ptrCap)
data->ptrCap = ::max(newNPtr, data->ptrCap * 2);
data = std::make_shared< SparseData<GPU, Dtype> >(data->nzCap, data->ptrCap);
}
data->nnz = newNNZ;
data->len_ptr = newNPtr;
}
template<typename Dtype>
MatType TensorTemplate<GPU, CSR_SPARSE, Dtype>::GetMatType()
{
return MatType::sparse;
}
template<typename Dtype>
MatMode TensorTemplate<GPU, CSR_SPARSE, Dtype>::GetMatMode()
{
return MatMode::gpu;
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::CopyFrom(SpTensor<CPU, Dtype>& src)
{
this->shape = src.shape;
ResizeSp(src.data->nnz, src.data->len_ptr);
hipMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, hipMemcpyHostToDevice);
hipMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, hipMemcpyHostToDevice);
hipMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, hipMemcpyHostToDevice);
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::CopyFrom(SpTensor<GPU, Dtype>& src)
{
this->shape = src.shape;
ResizeSp(src.data->nnz, src.data->len_ptr);
hipMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, hipMemcpyDeviceToDevice);
hipMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, hipMemcpyDeviceToDevice);
hipMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, hipMemcpyDeviceToDevice);
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::ShallowCopy(SpTensor<GPU, Dtype>& src)
{
this->shape = src.shape;
this->data = src.data;
}
template<typename dstDtype, typename srcDtype>
__global__ void SparseMatColReduceKernel(dstDtype* dst, int* row_ptr, int* col_idx, srcDtype *val)
{
__shared__ dstDtype buffer[REDUCE_THREADS];
int i_start = row_ptr[blockIdx.x] + threadIdx.x;
int i_end = row_ptr[blockIdx.x + 1];
int i_step = blockDim.x;
if (i_start < i_end)
buffer[threadIdx.x] = i_start;
for (int i = i_start + i_step; i < i_end; i += i_step)
{
if (val[i] > val[buffer[threadIdx.x]])
buffer[threadIdx.x] = i;
}
__syncthreads();
int shift;
for (int i = REDUCE_THREAD_BITS - 1; i >= 0; --i)
{
shift = 1 << i;
if (threadIdx.x < shift && threadIdx.x + shift < row_ptr[blockIdx.x + 1] - row_ptr[blockIdx.x])
{
if (val[buffer[threadIdx.x]] < buffer[threadIdx.x + shift])
buffer[threadIdx.x] = buffer[threadIdx.x + shift];
}
__syncthreads();
}
if (threadIdx.x == 0)
dst[blockIdx.x] = col_idx[buffer[0]];
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::ArgMax(DTensor<GPU, int>& dst, uint axis)
{
ASSERT(axis == 0, "not supported for axis > 0 in GPU Sparse Tensor");
dst.Reshape({this->shape[0]});
dim3 blocks(this->shape[0]);
dim3 threads(REDUCE_THREADS);
hipLaunchKernelGGL(( SparseMatColReduceKernel), dim3(blocks), dim3(threads), 0, cudaStreamPerThread, dst.data->ptr, data->row_ptr, data->col_idx, data->val);
}
template class TensorTemplate<GPU, CSR_SPARSE, float>;
template class TensorTemplate<GPU, CSR_SPARSE, double>;
TensorTemplate<GPU, CSR_SPARSE, int>::TensorTemplate() : data(nullptr)
{
}
void TensorTemplate<GPU, CSR_SPARSE, int>::Reshape(std::vector<size_t> l)
{
}
MatType TensorTemplate<GPU, CSR_SPARSE, int>::GetMatType()
{
return MatType::sparse;
}
MatMode TensorTemplate<GPU, CSR_SPARSE, int>::GetMatMode()
{
return MatMode::gpu;
}
void TensorTemplate<GPU, CSR_SPARSE, int>::ShallowCopy(SpTensor<GPU, int>& src)
{
this->shape = src.shape;
this->data = src.data;
}
void TensorTemplate<GPU, CSR_SPARSE, int>::ResizeSp(int newNNZ, int newNPtr)
{
if (this->data == nullptr)
this->data = std::make_shared< SparseData<GPU, int> >();
if (newNNZ > data->nzCap || newNPtr > data->ptrCap)
{
if (newNNZ > data->nzCap)
data->nzCap = ::max(newNNZ, data->nzCap * 2);
if (newNPtr > data->ptrCap)
data->ptrCap = ::max(newNPtr, data->ptrCap * 2);
data = std::make_shared< SparseData<GPU, int> >(data->nzCap, data->ptrCap);
}
data->nnz = newNNZ;
data->len_ptr = newNPtr;
}
template class TensorTemplate<GPU, CSR_SPARSE, int>;
} | aa9e199887e83e65ffcdc3b2b117b579b0b9a131.cu | #include "tensor/gpu_sparse_tensor.h"
#include "tensor/cpu_sparse_tensor.h"
#include "tensor/t_data.h"
#include "tensor/gpu_dense_tensor.h"
#include "tensor/gpu_reduce_kernel.h"
#include "util/mem_holder.h"
#include <cstring>
#include <cassert>
namespace gnn
{
template<typename Dtype>
TensorTemplate<GPU, CSR_SPARSE, Dtype>::TensorTemplate() : data(nullptr)
{
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::Reshape(std::vector<size_t> l)
{
ASSERT(l.size() == 2, "only support sparse matrix");
this->shape.Reshape(l);
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::ResizeSp(int newNNZ, int newNPtr)
{
if (this->data == nullptr)
this->data = std::make_shared< SparseData<GPU, Dtype> >();
if (newNNZ > data->nzCap || newNPtr > data->ptrCap)
{
if (newNNZ > data->nzCap)
data->nzCap = std::max(newNNZ, data->nzCap * 2);
if (newNPtr > data->ptrCap)
data->ptrCap = std::max(newNPtr, data->ptrCap * 2);
data = std::make_shared< SparseData<GPU, Dtype> >(data->nzCap, data->ptrCap);
}
data->nnz = newNNZ;
data->len_ptr = newNPtr;
}
template<typename Dtype>
MatType TensorTemplate<GPU, CSR_SPARSE, Dtype>::GetMatType()
{
return MatType::sparse;
}
template<typename Dtype>
MatMode TensorTemplate<GPU, CSR_SPARSE, Dtype>::GetMatMode()
{
return MatMode::gpu;
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::CopyFrom(SpTensor<CPU, Dtype>& src)
{
this->shape = src.shape;
ResizeSp(src.data->nnz, src.data->len_ptr);
cudaMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, cudaMemcpyHostToDevice);
cudaMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, cudaMemcpyHostToDevice);
cudaMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, cudaMemcpyHostToDevice);
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::CopyFrom(SpTensor<GPU, Dtype>& src)
{
this->shape = src.shape;
ResizeSp(src.data->nnz, src.data->len_ptr);
cudaMemcpy(data->val, src.data->val, sizeof(Dtype) * src.data->nnz, cudaMemcpyDeviceToDevice);
cudaMemcpy(data->col_idx, src.data->col_idx, sizeof(int) * src.data->nnz, cudaMemcpyDeviceToDevice);
cudaMemcpy(data->row_ptr, src.data->row_ptr, sizeof(int) * src.data->len_ptr, cudaMemcpyDeviceToDevice);
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::ShallowCopy(SpTensor<GPU, Dtype>& src)
{
this->shape = src.shape;
this->data = src.data;
}
template<typename dstDtype, typename srcDtype>
__global__ void SparseMatColReduceKernel(dstDtype* dst, int* row_ptr, int* col_idx, srcDtype *val)
{
__shared__ dstDtype buffer[REDUCE_THREADS];
int i_start = row_ptr[blockIdx.x] + threadIdx.x;
int i_end = row_ptr[blockIdx.x + 1];
int i_step = blockDim.x;
if (i_start < i_end)
buffer[threadIdx.x] = i_start;
for (int i = i_start + i_step; i < i_end; i += i_step)
{
if (val[i] > val[buffer[threadIdx.x]])
buffer[threadIdx.x] = i;
}
__syncthreads();
int shift;
for (int i = REDUCE_THREAD_BITS - 1; i >= 0; --i)
{
shift = 1 << i;
if (threadIdx.x < shift && threadIdx.x + shift < row_ptr[blockIdx.x + 1] - row_ptr[blockIdx.x])
{
if (val[buffer[threadIdx.x]] < buffer[threadIdx.x + shift])
buffer[threadIdx.x] = buffer[threadIdx.x + shift];
}
__syncthreads();
}
if (threadIdx.x == 0)
dst[blockIdx.x] = col_idx[buffer[0]];
}
template<typename Dtype>
void TensorTemplate<GPU, CSR_SPARSE, Dtype>::ArgMax(DTensor<GPU, int>& dst, uint axis)
{
ASSERT(axis == 0, "not supported for axis > 0 in GPU Sparse Tensor");
dst.Reshape({this->shape[0]});
dim3 blocks(this->shape[0]);
dim3 threads(REDUCE_THREADS);
SparseMatColReduceKernel<<<blocks, threads, 0, cudaStreamPerThread>>> (dst.data->ptr, data->row_ptr, data->col_idx, data->val);
}
template class TensorTemplate<GPU, CSR_SPARSE, float>;
template class TensorTemplate<GPU, CSR_SPARSE, double>;
TensorTemplate<GPU, CSR_SPARSE, int>::TensorTemplate() : data(nullptr)
{
}
void TensorTemplate<GPU, CSR_SPARSE, int>::Reshape(std::vector<size_t> l)
{
}
MatType TensorTemplate<GPU, CSR_SPARSE, int>::GetMatType()
{
return MatType::sparse;
}
MatMode TensorTemplate<GPU, CSR_SPARSE, int>::GetMatMode()
{
return MatMode::gpu;
}
void TensorTemplate<GPU, CSR_SPARSE, int>::ShallowCopy(SpTensor<GPU, int>& src)
{
this->shape = src.shape;
this->data = src.data;
}
void TensorTemplate<GPU, CSR_SPARSE, int>::ResizeSp(int newNNZ, int newNPtr)
{
if (this->data == nullptr)
this->data = std::make_shared< SparseData<GPU, int> >();
if (newNNZ > data->nzCap || newNPtr > data->ptrCap)
{
if (newNNZ > data->nzCap)
data->nzCap = std::max(newNNZ, data->nzCap * 2);
if (newNPtr > data->ptrCap)
data->ptrCap = std::max(newNPtr, data->ptrCap * 2);
data = std::make_shared< SparseData<GPU, int> >(data->nzCap, data->ptrCap);
}
data->nnz = newNNZ;
data->len_ptr = newNPtr;
}
template class TensorTemplate<GPU, CSR_SPARSE, int>;
} |
f84623fac19650d3b9f8e44978625343d57612d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaBRT.h"
#include <iostream>
#define DEFAULT_THREAD_PER_BLOCK 1024
/*check error code of hipMalloc and print out if needed*/
#define safe_cuda(CODE)\
{\
hipError_t err = CODE;\
if(err != hipSuccess) {\
std::cout<<"CUDA error:"<<hipGetErrorString(err)<<std::endl;\
}\
}
/**
* alloc a memory on gpu and copy data from cpu to gpu.
*/
inline void copyFromCPUtoGPU(void** dst, void* src, int size)
{
hipMalloc(dst, size);
safe_cuda(hipMemcpy(*dst, src, size, hipMemcpyHostToDevice));
}
/**
* alloc a memory on cpu and copy data from gpu to cpu.
*/
inline void copyFromGPUtoCPU(void** dst, void* src, int size)
{
*dst = malloc(size);
safe_cuda(hipMemcpy(*dst, src, size, hipMemcpyDeviceToHost));
}
/**
* intialize parallelBRTreeBuilder by copying the data needed
* from host memory (CPU) to device memory (GPU), initialize
* data members such as configuration parameters.
*/
ParallelBRTreeBuilder::ParallelBRTreeBuilder(unsigned int* const sorted_morton_code, BBox* const bboxes, int size) :
d_sorted_morton_code(0),
d_leaf_nodes(0),
h_leaf_nodes(0),
d_internal_nodes(0),
h_internal_nodes(0),
numInternalNode(size - 1),
numLeafNode(size)
{
//copy data from cpu to gpu
copyFromCPUtoGPU((void**)&d_sorted_morton_code, sorted_morton_code, size * sizeof(unsigned int));
copyFromCPUtoGPU((void**)&d_bboxes, bboxes, size * sizeof(BBox));
//initialize d_leaf_nodes and d_internal_nodes
h_leaf_nodes = (BRTreeNode*)calloc(numLeafNode, sizeof(BRTreeNode));
for (int idx = 0; idx < numLeafNode; idx++) {
h_leaf_nodes[idx].setIdx(idx);
h_leaf_nodes[idx].bbox = BBox();
}
copyFromCPUtoGPU((void**)&d_leaf_nodes, h_leaf_nodes, numLeafNode * sizeof(BRTreeNode));
free(h_leaf_nodes);
h_internal_nodes = (BRTreeNode*)calloc(numInternalNode, sizeof(BRTreeNode));
for (int idx = 0; idx < numInternalNode; idx++) {
h_internal_nodes[idx].setIdx(idx);
h_internal_nodes[idx].bbox = BBox();
}
copyFromCPUtoGPU((void**)&d_internal_nodes, h_internal_nodes, numInternalNode * sizeof(BRTreeNode));
free(h_internal_nodes);
}
/**
* delta operator measures the common prefix of two morton_code
* if j is not in the range of the sorted_morton_code,
* delta operator returns -1.
*/
__device__ int delta(int i, int j, unsigned int* sorted_morton_code, int length)
{
if (j<0 || j >= length)
{
return -1;
}
else
{
return __clz(sorted_morton_code[i] ^ sorted_morton_code[j]);
}
}
/**
* determine the range of an internal node
*/
__device__ int2 determineRange(unsigned int* sorted_morton_code, int numInternalNode, int i)
{
int size = numInternalNode + 1;
int d = delta(i, i + 1, sorted_morton_code, size) - delta(i, i - 1, sorted_morton_code, size);
d = d > 0 ? 1 : -1;
//compute the upper bound for the length of the range
int delta_min = delta(i, i - d, sorted_morton_code, size);
int lmax = 2;
while (delta(i, i + lmax*d, sorted_morton_code, size)>delta_min)
{
lmax = lmax * 2;
}
//find the other end using binary search
int l = 0;
for (int t = lmax / 2; t >= 1; t /= 2)
{
if (delta(i, i + (l + t)*d, sorted_morton_code, size)>delta_min)
{
l = l + t;
}
}
int j = i + l*d;
int2 range;
if (i <= j) { range.x = i; range.y = j; }
else { range.x = j; range.y = i; }
return range;
}
/**
* to judge if two values differes
* in bit position n
*/
__device__ bool is_diff_at_bit(unsigned int val1, unsigned int val2, int n)
{
return val1 >> (31 - n) != val2 >> (31 - n);
}
/**
* find the best split position for an internal node
*/
__device__ int findSplit(unsigned int* sorted_morton_code, int start, int last)
{
//return -1 if there is only
//one primitive under this node.
if (start == last)
{
return -1;
}
else
{
int common_prefix = __clz(sorted_morton_code[start] ^ sorted_morton_code[last]);
//handle duplicated morton code separately
if (common_prefix == 32)
{
return (start + last) / 2;
}
// Use binary search to find where the next bit differs.
// Specifically, we are looking for the highest object that
// shares more than commonPrefix bits with the first one.
int split = start; // initial guess
int step = last - start;
do
{
step = (step + 1) >> 1; // exponential decrease
int newSplit = split + step; // proposed new position
if (newSplit < last)
{
bool is_diff = is_diff_at_bit(sorted_morton_code[start],
sorted_morton_code[newSplit],
common_prefix);
if (!is_diff)
{
split = newSplit; // accept proposal
}
}
} while (step > 1);
return split;
}
}
//FOR BR-TREE CONSTRUCTION
//TODO: implement internal node processing routine
//TODO: handle duplicated morton codes as special case (using their position i,j as fallback)
//FOR BVH CONSTRUCTION
//TODO: implement AABB construction process by go back from the tree node to the root
//TODO: convert BR-TREE BACK TO BVH
//TODO: debug
__global__ void processInternalNode(unsigned int* sorted_morton_code, int numInternalNode,
BRTreeNode* leafNodes,
BRTreeNode* internalNodes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numInternalNode) return;
// Find out which range of objects the node corresponds to.
int2 range = determineRange(sorted_morton_code, numInternalNode, idx);
int first = range.x;
int last = range.y;
// Determine where to split the range.
int split = findSplit(sorted_morton_code, first, last);
if (split == -1) return;
// Select childA.
BRTreeNode* childA;
bool isChildALeaf = false;
if (split == first) {
childA = &(leafNodes[split]);
isChildALeaf = true;
}
else childA = &(internalNodes[split]);
// Select childB.
BRTreeNode* childB;
bool isChildBLeaf = false;
if (split + 1 == last) {
childB = &(leafNodes[split + 1]);
isChildBLeaf = true;
}
else childB = &(internalNodes[split + 1]);
// Record parent-child relationships.
internalNodes[idx].setChildA(split, isChildALeaf);
internalNodes[idx].setChildB(split + 1, isChildBLeaf);
childA->setParent(idx);
childB->setParent(idx);
}
/**
* construct bounding boxes from leaf up to root
*/
__global__ void calculateBoudingBox(BBox* d_bboxes, int numLeafNode,
BRTreeNode* leafNodes, BRTreeNode* internalNodes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numLeafNode) return;
//handle leaf first
BRTreeNode* node = &leafNodes[idx];
node->bbox = d_bboxes[idx];
//terminate if it is root node
bool is_null = false;
int parentIdx = node->getParent(is_null);
//if (is_null) return;
node = &internalNodes[parentIdx];
int initial_val = atomicInc(&node->counter, 1);
while (1)
{
if (initial_val == 0) return; //terminate the first accesing thread
//calculate bounding box by merging two children's bounding box
bool is_leaf = false;
int childAIdx = node->getChildA(is_leaf, is_null);
if (is_leaf) node->bbox.expand(leafNodes[childAIdx].bbox);
else node->bbox.expand(internalNodes[childAIdx].bbox);
int childBIdx = node->getChildB(is_leaf, is_null);
if (is_leaf) node->bbox.expand(leafNodes[childBIdx].bbox);
else node->bbox.expand(internalNodes[childBIdx].bbox);
//terminate if it is root node
parentIdx = node->getParent(is_null);
if (is_null) return;
node = &internalNodes[parentIdx];
initial_val = atomicInc(&node->counter, 1);
}
}
/**
* build binary radix tree on GPU
*/
void ParallelBRTreeBuilder::build()
{
//build the bvh
int threadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int numBlock = (numInternalNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock;
processInternalNode << <numBlock, threadPerBlock >> >(d_sorted_morton_code, numInternalNode,
d_leaf_nodes, d_internal_nodes);
//fix << <1, 1 >> > (d_leaf_nodes, d_internal_nodes);
//calculate bounding box
threadPerBlock = DEFAULT_THREAD_PER_BLOCK;
numBlock = (numLeafNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock;
calculateBoudingBox << <numBlock, threadPerBlock >> >(d_bboxes, numLeafNode,
d_leaf_nodes, d_internal_nodes);
}
/**
* get leaf nodes (host)
*/
BRTreeNode* ParallelBRTreeBuilder::get_leaf_nodes()
{
copyFromGPUtoCPU((void**)&h_leaf_nodes, d_leaf_nodes, numLeafNode * sizeof(BRTreeNode));
return h_leaf_nodes;
}
/**
* get internal nodes (host)
*/
BRTreeNode* ParallelBRTreeBuilder::get_internal_nodes()
{
copyFromGPUtoCPU((void**)&h_internal_nodes, d_internal_nodes, numInternalNode * sizeof(BRTreeNode));
return h_internal_nodes;
}
/**
* free memory on host
*/
void ParallelBRTreeBuilder::freeHostMemory()
{
}
/**
* free memory on device
*/
void ParallelBRTreeBuilder::freeDeviceMemory()
{
hipFree(d_leaf_nodes);
hipFree(d_internal_nodes);
hipFree(d_sorted_morton_code);
}
/**
* deconstructor
*/
ParallelBRTreeBuilder::~ParallelBRTreeBuilder() {}
BRTreeNode* ParallelBRTreeBuilder::get_d_leaf_nodes()
{
return d_leaf_nodes;
}
BRTreeNode* ParallelBRTreeBuilder::get_d_internal_nodes()
{
return d_internal_nodes;
}
void ParallelBRTreeBuilder::set_d_leaf_nodes(BRTreeNode* p)
{
d_leaf_nodes = p;
}
void ParallelBRTreeBuilder::set_d_internal_nodes(BRTreeNode* p)
{
d_internal_nodes = p;
}
| f84623fac19650d3b9f8e44978625343d57612d0.cu | #include "cudaBRT.h"
#include <iostream>
#define DEFAULT_THREAD_PER_BLOCK 1024
/*check error code of cudaMalloc and print out if needed*/
#define safe_cuda(CODE)\
{\
cudaError_t err = CODE;\
if(err != cudaSuccess) {\
std::cout<<"CUDA error:"<<cudaGetErrorString(err)<<std::endl;\
}\
}
/**
* alloc a memory on gpu and copy data from cpu to gpu.
*/
inline void copyFromCPUtoGPU(void** dst, void* src, int size)
{
cudaMalloc(dst, size);
safe_cuda(cudaMemcpy(*dst, src, size, cudaMemcpyHostToDevice));
}
/**
* alloc a memory on cpu and copy data from gpu to cpu.
*/
inline void copyFromGPUtoCPU(void** dst, void* src, int size)
{
*dst = malloc(size);
safe_cuda(cudaMemcpy(*dst, src, size, cudaMemcpyDeviceToHost));
}
/**
* intialize parallelBRTreeBuilder by copying the data needed
* from host memory (CPU) to device memory (GPU), initialize
* data members such as configuration parameters.
*/
ParallelBRTreeBuilder::ParallelBRTreeBuilder(unsigned int* const sorted_morton_code, BBox* const bboxes, int size) :
d_sorted_morton_code(0),
d_leaf_nodes(0),
h_leaf_nodes(0),
d_internal_nodes(0),
h_internal_nodes(0),
numInternalNode(size - 1),
numLeafNode(size)
{
//copy data from cpu to gpu
copyFromCPUtoGPU((void**)&d_sorted_morton_code, sorted_morton_code, size * sizeof(unsigned int));
copyFromCPUtoGPU((void**)&d_bboxes, bboxes, size * sizeof(BBox));
//initialize d_leaf_nodes and d_internal_nodes
h_leaf_nodes = (BRTreeNode*)calloc(numLeafNode, sizeof(BRTreeNode));
for (int idx = 0; idx < numLeafNode; idx++) {
h_leaf_nodes[idx].setIdx(idx);
h_leaf_nodes[idx].bbox = BBox();
}
copyFromCPUtoGPU((void**)&d_leaf_nodes, h_leaf_nodes, numLeafNode * sizeof(BRTreeNode));
free(h_leaf_nodes);
h_internal_nodes = (BRTreeNode*)calloc(numInternalNode, sizeof(BRTreeNode));
for (int idx = 0; idx < numInternalNode; idx++) {
h_internal_nodes[idx].setIdx(idx);
h_internal_nodes[idx].bbox = BBox();
}
copyFromCPUtoGPU((void**)&d_internal_nodes, h_internal_nodes, numInternalNode * sizeof(BRTreeNode));
free(h_internal_nodes);
}
/**
* delta operator measures the common prefix of two morton_code
* if j is not in the range of the sorted_morton_code,
* delta operator returns -1.
*/
__device__ int delta(int i, int j, unsigned int* sorted_morton_code, int length)
{
if (j<0 || j >= length)
{
return -1;
}
else
{
return __clz(sorted_morton_code[i] ^ sorted_morton_code[j]);
}
}
/**
* determine the range of an internal node
*/
__device__ int2 determineRange(unsigned int* sorted_morton_code, int numInternalNode, int i)
{
int size = numInternalNode + 1;
int d = delta(i, i + 1, sorted_morton_code, size) - delta(i, i - 1, sorted_morton_code, size);
d = d > 0 ? 1 : -1;
//compute the upper bound for the length of the range
int delta_min = delta(i, i - d, sorted_morton_code, size);
int lmax = 2;
while (delta(i, i + lmax*d, sorted_morton_code, size)>delta_min)
{
lmax = lmax * 2;
}
//find the other end using binary search
int l = 0;
for (int t = lmax / 2; t >= 1; t /= 2)
{
if (delta(i, i + (l + t)*d, sorted_morton_code, size)>delta_min)
{
l = l + t;
}
}
int j = i + l*d;
int2 range;
if (i <= j) { range.x = i; range.y = j; }
else { range.x = j; range.y = i; }
return range;
}
/**
* to judge if two values differes
* in bit position n
*/
__device__ bool is_diff_at_bit(unsigned int val1, unsigned int val2, int n)
{
return val1 >> (31 - n) != val2 >> (31 - n);
}
/**
* find the best split position for an internal node
*/
__device__ int findSplit(unsigned int* sorted_morton_code, int start, int last)
{
//return -1 if there is only
//one primitive under this node.
if (start == last)
{
return -1;
}
else
{
int common_prefix = __clz(sorted_morton_code[start] ^ sorted_morton_code[last]);
//handle duplicated morton code separately
if (common_prefix == 32)
{
return (start + last) / 2;
}
// Use binary search to find where the next bit differs.
// Specifically, we are looking for the highest object that
// shares more than commonPrefix bits with the first one.
int split = start; // initial guess
int step = last - start;
do
{
step = (step + 1) >> 1; // exponential decrease
int newSplit = split + step; // proposed new position
if (newSplit < last)
{
bool is_diff = is_diff_at_bit(sorted_morton_code[start],
sorted_morton_code[newSplit],
common_prefix);
if (!is_diff)
{
split = newSplit; // accept proposal
}
}
} while (step > 1);
return split;
}
}
//FOR BR-TREE CONSTRUCTION
//TODO: implement internal node processing routine
//TODO: handle duplicated morton codes as special case (using their position i,j as fallback)
//FOR BVH CONSTRUCTION
//TODO: implement AABB construction process by go back from the tree node to the root
//TODO: convert BR-TREE BACK TO BVH
//TODO: debug
__global__ void processInternalNode(unsigned int* sorted_morton_code, int numInternalNode,
BRTreeNode* leafNodes,
BRTreeNode* internalNodes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numInternalNode) return;
// Find out which range of objects the node corresponds to.
int2 range = determineRange(sorted_morton_code, numInternalNode, idx);
int first = range.x;
int last = range.y;
// Determine where to split the range.
int split = findSplit(sorted_morton_code, first, last);
if (split == -1) return;
// Select childA.
BRTreeNode* childA;
bool isChildALeaf = false;
if (split == first) {
childA = &(leafNodes[split]);
isChildALeaf = true;
}
else childA = &(internalNodes[split]);
// Select childB.
BRTreeNode* childB;
bool isChildBLeaf = false;
if (split + 1 == last) {
childB = &(leafNodes[split + 1]);
isChildBLeaf = true;
}
else childB = &(internalNodes[split + 1]);
// Record parent-child relationships.
internalNodes[idx].setChildA(split, isChildALeaf);
internalNodes[idx].setChildB(split + 1, isChildBLeaf);
childA->setParent(idx);
childB->setParent(idx);
}
/**
* construct bounding boxes from leaf up to root
*/
__global__ void calculateBoudingBox(BBox* d_bboxes, int numLeafNode,
BRTreeNode* leafNodes, BRTreeNode* internalNodes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numLeafNode) return;
//handle leaf first
BRTreeNode* node = &leafNodes[idx];
node->bbox = d_bboxes[idx];
//terminate if it is root node
bool is_null = false;
int parentIdx = node->getParent(is_null);
//if (is_null) return;
node = &internalNodes[parentIdx];
int initial_val = atomicInc(&node->counter, 1);
while (1)
{
if (initial_val == 0) return; //terminate the first accesing thread
//calculate bounding box by merging two children's bounding box
bool is_leaf = false;
int childAIdx = node->getChildA(is_leaf, is_null);
if (is_leaf) node->bbox.expand(leafNodes[childAIdx].bbox);
else node->bbox.expand(internalNodes[childAIdx].bbox);
int childBIdx = node->getChildB(is_leaf, is_null);
if (is_leaf) node->bbox.expand(leafNodes[childBIdx].bbox);
else node->bbox.expand(internalNodes[childBIdx].bbox);
//terminate if it is root node
parentIdx = node->getParent(is_null);
if (is_null) return;
node = &internalNodes[parentIdx];
initial_val = atomicInc(&node->counter, 1);
}
}
/**
* build binary radix tree on GPU
*/
void ParallelBRTreeBuilder::build()
{
//build the bvh
int threadPerBlock = DEFAULT_THREAD_PER_BLOCK;
int numBlock = (numInternalNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock;
processInternalNode << <numBlock, threadPerBlock >> >(d_sorted_morton_code, numInternalNode,
d_leaf_nodes, d_internal_nodes);
//fix << <1, 1 >> > (d_leaf_nodes, d_internal_nodes);
//calculate bounding box
threadPerBlock = DEFAULT_THREAD_PER_BLOCK;
numBlock = (numLeafNode + DEFAULT_THREAD_PER_BLOCK - 1) / threadPerBlock;
calculateBoudingBox << <numBlock, threadPerBlock >> >(d_bboxes, numLeafNode,
d_leaf_nodes, d_internal_nodes);
}
/**
* get leaf nodes (host)
*/
BRTreeNode* ParallelBRTreeBuilder::get_leaf_nodes()
{
copyFromGPUtoCPU((void**)&h_leaf_nodes, d_leaf_nodes, numLeafNode * sizeof(BRTreeNode));
return h_leaf_nodes;
}
/**
* get internal nodes (host)
*/
BRTreeNode* ParallelBRTreeBuilder::get_internal_nodes()
{
copyFromGPUtoCPU((void**)&h_internal_nodes, d_internal_nodes, numInternalNode * sizeof(BRTreeNode));
return h_internal_nodes;
}
/**
* free memory on host
*/
void ParallelBRTreeBuilder::freeHostMemory()
{
}
/**
* free memory on device
*/
void ParallelBRTreeBuilder::freeDeviceMemory()
{
cudaFree(d_leaf_nodes);
cudaFree(d_internal_nodes);
cudaFree(d_sorted_morton_code);
}
/**
* deconstructor
*/
ParallelBRTreeBuilder::~ParallelBRTreeBuilder() {}
BRTreeNode* ParallelBRTreeBuilder::get_d_leaf_nodes()
{
return d_leaf_nodes;
}
BRTreeNode* ParallelBRTreeBuilder::get_d_internal_nodes()
{
return d_internal_nodes;
}
void ParallelBRTreeBuilder::set_d_leaf_nodes(BRTreeNode* p)
{
d_leaf_nodes = p;
}
void ParallelBRTreeBuilder::set_d_internal_nodes(BRTreeNode* p)
{
d_internal_nodes = p;
}
|
ec570f3c768b7d516a4637dbf3627afbc2691696.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out(odata, odata + n);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
printf("Thrust scan: %f ms\n",timer().getGpuElapsedTimeForPreviousOperation());
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
| ec570f3c768b7d516a4637dbf3627afbc2691696.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
thrust::device_vector<int> dv_in(idata, idata + n);
thrust::device_vector<int> dv_out(odata, odata + n);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
printf("Thrust scan: %f ms\n",timer().getGpuElapsedTimeForPreviousOperation());
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
46288142512582199e0f7181f66936316e11e965.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <strings.h>
#include <math.h>
#include "common.h"
#define D2R M_PI/180.0
#define R2D 180.0/M_PI
#define R2AM 60.0*180.0/M_PI
#define bins_per_dec 5
#define min_arcmin 1.0
#define max_arcmin 10000.0
#define NUM_BINS 20
typedef struct _options_
{
char *data_name;
char *random_name;
int random_count;
int npoints;
char *output_name;
} options;
void usage(char *name);
void parse_args(int argc, char **argv, options* args);
double cpu_time ( void )
{
double value;
value = ( double ) clock ( ) / ( double ) CLOCKS_PER_SEC;
return value;
}
#ifndef _HEADER
#define _HEADER
#ifdef __cplusplus
extern "C" {
#endif
#include <unistd.h>
/* Command line parameters for benchmarks */
struct pb_Parameters {
char *outFile; /* If not NULL, the raw output of the
* computation should be saved to this
* file. The string is owned. */
char **inpFiles; /* A NULL-terminated array of strings
* holding the input file(s) for the
* computation. The array and strings
* are owned. */
};
/* Read command-line parameters.
*
* The argc and argv parameters to main are read, and any parameters
* interpreted by this function are removed from the argument list.
*
* A new instance of struct pb_Parameters is returned.
* If there is an error, then an error message is printed on stderr
* and NULL is returned.
*/
struct pb_Parameters *
pb_ReadParameters(int *_argc, char **argv);
/* Free an instance of struct pb_Parameters.
*/
void
pb_FreeParameters(struct pb_Parameters *p);
/* Count the number of input files in a pb_Parameters instance.
*/
int
pb_Parameters_CountInputs(struct pb_Parameters *p);
/* A time or duration. */
#if _POSIX_VERSION >= 200112L
typedef unsigned long long pb_Timestamp; /* time in microseconds */
#else
# error "Timestamps not implemented"
#endif
enum pb_TimerState {
pb_Timer_STOPPED,
pb_Timer_RUNNING,
};
struct pb_Timer {
enum pb_TimerState state;
pb_Timestamp elapsed; /* Amount of time elapsed so far */
pb_Timestamp init; /* Beginning of the current time interval,
* if state is RUNNING. End of the last
* recorded time interfal otherwise. */
};
/* Reset a timer.
* Use this to initialize a timer or to clear
* its elapsed time. The reset timer is stopped.
*/
void
pb_ResetTimer(struct pb_Timer *timer);
/* Start a timer. The timer is set to RUNNING mode and
* time elapsed while the timer is running is added to
* the timer.
* The timer should not already be running.
*/
void
pb_StartTimer(struct pb_Timer *timer);
/* Stop a timer.
* This stops adding elapsed time to the timer.
* The timer should not already be stopped.
*/
void
pb_StopTimer(struct pb_Timer *timer);
/* Get the elapsed time in seconds. */
double
pb_GetElapsedTime(struct pb_Timer *timer);
/* Execution time is assigned to one of these categories. */
enum pb_TimerID {
pb_TimerID_NONE = 0,
pb_TimerID_IO, /* Time spent in input/output */
pb_TimerID_KERNEL, /* Time spent computing on the device,
* recorded asynchronously */
pb_TimerID_COPY, /* Time spent synchronously moving data
* to/from device and allocating/freeing
* memory on the device */
pb_TimerID_DRIVER, /* Time spent in the host interacting with the
* driver, primarily for recording the time
* spent queueing asynchronous operations */
pb_TimerID_COPY_ASYNC, /* Time spent in asynchronous transfers */
pb_TimerID_COMPUTE, /* Time for all program execution other
* than parsing command line arguments,
* I/O, kernel, and copy */
pb_TimerID_OVERLAP, /* Time double-counted in asynchronous and
* host activity: automatically filled in,
* not intended for direct usage */
pb_TimerID_LAST /* Number of timer IDs */
};
/* Dynamic list of asynchronously tracked times between events */
struct pb_async_time_marker_list {
char *label; // actually just a pointer to a string
enum pb_TimerID timerID; /* The ID to which the interval beginning
* with this marker should be attributed */
void * marker;
//hipEvent_t marker; /* The driver event for this marker */
struct pb_async_time_marker_list *next;
};
struct pb_SubTimer {
char *label;
struct pb_Timer timer;
struct pb_SubTimer *next;
};
struct pb_SubTimerList {
struct pb_SubTimer *current;
struct pb_SubTimer *subtimer_list;
};
/* A set of timers for recording execution times. */
struct pb_TimerSet {
enum pb_TimerID current;
struct pb_async_time_marker_list* async_markers;
pb_Timestamp async_begin;
pb_Timestamp wall_begin;
struct pb_Timer timers[pb_TimerID_LAST];
struct pb_SubTimerList *sub_timer_list[pb_TimerID_LAST];
};
/* Reset all timers in the set. */
void
pb_InitializeTimerSet(struct pb_TimerSet *timers);
void
pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category);
/* Select which timer the next interval of time should be accounted
* to. The selected timer is started and other timers are stopped.
* Using pb_TimerID_NONE stops all timers. */
void
pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer);
void
pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category);
/* Print timer values to standard output. */
void
pb_PrintTimerSet(struct pb_TimerSet *timers);
/* Release timer resources */
void
pb_DestroyTimerSet(struct pb_TimerSet * timers);
void
pb_SetOpenCL(void *clContextPtr, void *clCommandQueuePtr);
#ifdef __cplusplus
}
#endif
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if _POSIX_VERSION >= 200112L
# include <sys/time.h>
#endif
/* Free an array of owned strings. */
static void
free_string_array(char **string_array)
{
char **p;
if (!string_array) return;
for (p = string_array; *p; p++) free(*p);
free(string_array);
}
/* Parse a comma-delimited list of strings into an
* array of strings. */
static char **
read_string_array(char *in)
{
char **ret;
int i;
int count; /* Number of items in the input */
char *substring; /* Current substring within 'in' */
/* Count the number of items in the string */
count = 1;
for (i = 0; in[i]; i++) if (in[i] == ',') count++;
/* Allocate storage */
ret = (char **)malloc((count + 1) * sizeof(char *));
/* Create copies of the strings from the list */
substring = in;
for (i = 0; i < count; i++) {
char *substring_end;
int substring_length;
/* Find length of substring */
for (substring_end = substring;
(*substring_end != ',') && (*substring_end != 0);
substring_end++);
substring_length = substring_end - substring;
/* Allocate memory and copy the substring */
ret[i] = (char *)malloc(substring_length + 1);
memcpy(ret[i], substring, substring_length);
ret[i][substring_length] = 0;
/* go to next substring */
substring = substring_end + 1;
}
ret[i] = NULL; /* Write the sentinel value */
return ret;
}
struct argparse {
int argc; /* Number of arguments. Mutable. */
char **argv; /* Argument values. Immutable. */
int argn; /* Current argument number. */
char **argv_get; /* Argument value being read. */
char **argv_put; /* Argument value being written.
* argv_put <= argv_get. */
};
static void
initialize_argparse(struct argparse *ap, int argc, char **argv)
{
ap->argc = argc;
ap->argn = 0;
ap->argv_get = ap->argv_put = ap->argv = argv;
}
static void
finalize_argparse(struct argparse *ap)
{
/* Move the remaining arguments */
for (; ap->argn < ap->argc; ap->argn++)
*ap->argv_put++ = *ap->argv_get++;
}
/* Delete the current argument. */
static void
delete_argument(struct argparse *ap)
{
if (ap->argn >= ap->argc) {
fprintf(stderr, "delete_argument\n");
}
ap->argc--;
ap->argv_get++;
}
/* Go to the next argument. Also, move the current argument to its
* final location in argv. */
static void
next_argument(struct argparse *ap)
{
if (ap->argn >= ap->argc) {
fprintf(stderr, "next_argument\n");
}
/* Move argument to its new location. */
*ap->argv_put++ = *ap->argv_get++;
ap->argn++;
}
static int
is_end_of_arguments(struct argparse *ap)
{
return ap->argn == ap->argc;
}
static char *
get_argument(struct argparse *ap)
{
return *ap->argv_get;
}
static char *
consume_argument(struct argparse *ap)
{
char *ret = get_argument(ap);
delete_argument(ap);
return ret;
}
struct pb_Parameters *
pb_ReadParameters(int *_argc, char **argv)
{
char *err_message;
struct argparse ap;
struct pb_Parameters *ret =
(struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));
/* Initialize the parameters structure */
ret->outFile = NULL;
ret->inpFiles = (char **)malloc(sizeof(char *));
ret->inpFiles[0] = NULL;
/* Each argument */
initialize_argparse(&ap, *_argc, argv);
while (!is_end_of_arguments(&ap)) {
char *arg = get_argument(&ap);
/* Single-character flag */
if ((arg[0] == '-') && (arg[1] != 0) && (arg[2] == 0)) {
delete_argument(&ap); /* This argument is consumed here */
switch (arg[1]) {
case 'o': /* Output file name */
if (is_end_of_arguments(&ap))
{
err_message = "Expecting file name after '-o'\n";
goto error;
}
free(ret->outFile);
ret->outFile = strdup(consume_argument(&ap));
break;
case 'i': /* Input file name */
if (is_end_of_arguments(&ap))
{
err_message = "Expecting file name after '-i'\n";
goto error;
}
ret->inpFiles = read_string_array(consume_argument(&ap));
break;
case '-': /* End of options */
goto end_of_options;
default:
err_message = "Unexpected command-line parameter\n";
goto error;
}
}
else {
/* Other parameters are ignored */
next_argument(&ap);
}
} /* end for each argument */
end_of_options:
*_argc = ap.argc; /* Save the modified argc value */
finalize_argparse(&ap);
return ret;
error:
fputs(err_message, stderr);
pb_FreeParameters(ret);
return NULL;
}
void
pb_FreeParameters(struct pb_Parameters *p)
{
char **cpp;
free(p->outFile);
free_string_array(p->inpFiles);
free(p);
}
int
pb_Parameters_CountInputs(struct pb_Parameters *p)
{
int n;
for (n = 0; p->inpFiles[n]; n++);
return n;
}
/*****************************************************************************/
/* Timer routines */
static void
accumulate_time(pb_Timestamp *accum,
pb_Timestamp start,
pb_Timestamp end)
{
#if _POSIX_VERSION >= 200112L
* accum += end - start;
#else
# error "Timestamps not implemented for this system"
#endif
}
#if _POSIX_VERSION >= 200112L
static pb_Timestamp get_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (pb_Timestamp)(tv.tv_sec * 1000000LL + tv.tv_usec);
}
#else
# error "no supported time libraries are available on this platform"
#endif
void
pb_ResetTimer(struct pb_Timer *timer)
{
timer->state = pb_Timer_STOPPED;
#if _POSIX_VERSION >= 200112L
timer->elapsed = 0;
#else
# error "pb_ResetTimer: not implemented for this system"
#endif
}
void
pb_StartTimer(struct pb_Timer *timer)
{
if (timer->state != pb_Timer_STOPPED) {
fputs("Ignoring attempt to start a running timer\n", stderr);
return;
}
timer->state = pb_Timer_RUNNING;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
timer->init = tv.tv_sec * 1000000LL + tv.tv_usec;
}
#else
# error "pb_StartTimer: not implemented for this system"
#endif
}
void
pb_StartTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer)
{
unsigned int numNotStopped = 0x3; // 11
if (timer->state != pb_Timer_STOPPED) {
fputs("Warning: Timer was not stopped\n", stderr);
numNotStopped &= 0x1; // Zero out 2^1
}
if (subtimer->state != pb_Timer_STOPPED) {
fputs("Warning: Subtimer was not stopped\n", stderr);
numNotStopped &= 0x2; // Zero out 2^0
}
if (numNotStopped == 0x0) {
fputs("Ignoring attempt to start running timer and subtimer\n", stderr);
return;
}
timer->state = pb_Timer_RUNNING;
subtimer->state = pb_Timer_RUNNING;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
if (numNotStopped & 0x2) {
timer->init = tv.tv_sec * 1000000LL + tv.tv_usec;
}
if (numNotStopped & 0x1) {
subtimer->init = tv.tv_sec * 1000000LL + tv.tv_usec;
}
}
#else
# error "pb_StartTimer: not implemented for this system"
#endif
}
void
pb_StopTimer(struct pb_Timer *timer)
{
pb_Timestamp fini;
if (timer->state != pb_Timer_RUNNING) {
fputs("Ignoring attempt to stop a stopped timer\n", stderr);
return;
}
timer->state = pb_Timer_STOPPED;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
fini = tv.tv_sec * 1000000LL + tv.tv_usec;
}
#else
# error "pb_StopTimer: not implemented for this system"
#endif
accumulate_time(&timer->elapsed, timer->init, fini);
timer->init = fini;
}
void pb_StopTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) {
pb_Timestamp fini;
unsigned int numNotRunning = 0x3; // 0b11
if (timer->state != pb_Timer_RUNNING) {
fputs("Warning: Timer was not running\n", stderr);
numNotRunning &= 0x1; // Zero out 2^1
}
if (subtimer->state != pb_Timer_RUNNING) {
fputs("Warning: Subtimer was not running\n", stderr);
numNotRunning &= 0x2; // Zero out 2^0
}
if (numNotRunning == 0x0) {
fputs("Ignoring attempt to stop stopped timer and subtimer\n", stderr);
return;
}
timer->state = pb_Timer_STOPPED;
subtimer->state = pb_Timer_STOPPED;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
fini = tv.tv_sec * 1000000LL + tv.tv_usec;
}
#else
# error "pb_StopTimer: not implemented for this system"
#endif
if (numNotRunning & 0x2) {
accumulate_time(&timer->elapsed, timer->init, fini);
timer->init = fini;
}
if (numNotRunning & 0x1) {
accumulate_time(&subtimer->elapsed, subtimer->init, fini);
subtimer->init = fini;
}
}
/* Get the elapsed time in seconds. */
double
pb_GetElapsedTime(struct pb_Timer *timer)
{
double ret;
if (timer->state != pb_Timer_STOPPED) {
fputs("Elapsed time from a running timer is inaccurate\n", stderr);
}
#if _POSIX_VERSION >= 200112L
ret = timer->elapsed / 1e6;
#else
# error "pb_GetElapsedTime: not implemented for this system"
#endif
return ret;
}
void
pb_InitializeTimerSet(struct pb_TimerSet *timers)
{
int n;
timers->wall_begin = get_time();
timers->current = pb_TimerID_NONE;
timers->async_markers = NULL;
for (n = 0; n < pb_TimerID_LAST; n++) {
pb_ResetTimer(&timers->timers[n]);
timers->sub_timer_list[n] = NULL; // free first?
}
}
void
pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category) {
struct pb_SubTimer *subtimer = (struct pb_SubTimer *) malloc
(sizeof(struct pb_SubTimer));
int len = strlen(label);
subtimer->label = (char *)malloc(sizeof(char)*(len + 1));
sprintf(subtimer->label, "%s\0", label);
pb_ResetTimer(&subtimer->timer);
subtimer->next = NULL;
struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[pb_Category];
if (subtimerlist == NULL) {
subtimerlist = (struct pb_SubTimerList *) malloc
(sizeof(struct pb_SubTimerList));
subtimerlist->subtimer_list = subtimer;
timers->sub_timer_list[pb_Category] = subtimerlist;
}
else {
// Append to list
struct pb_SubTimer *element = subtimerlist->subtimer_list;
while (element->next != NULL) {
element = element->next;
}
element->next = subtimer;
}
}
void
pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category)
{
// switchToSub( NULL, NONE
// switchToSub( NULL, some
// switchToSub( some, some
// switchToSub( some, NONE -- tries to find "some" in NONE's sublist, which won't be printed
struct pb_Timer *topLevelToStop = NULL;
if (timers->current != category && timers->current != pb_TimerID_NONE) {
// Switching to subtimer in a different category needs to stop the top-level current, different categoried timer.
// NONE shouldn't have a timer associated with it, so exclude from branch
topLevelToStop = &timers->timers[timers->current];
}
struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];
struct pb_SubTimer *curr = (subtimerlist == NULL) ? NULL : subtimerlist->current;
if (timers->current != pb_TimerID_NONE) {
if (curr != NULL && topLevelToStop != NULL) {
pb_StopTimerAndSubTimer(topLevelToStop, &curr->timer);
}
else if (curr != NULL) {
pb_StopTimer(&curr->timer);
}
else {
pb_StopTimer(topLevelToStop);
}
}
subtimerlist = timers->sub_timer_list[category];
struct pb_SubTimer *subtimer = NULL;
if (label != NULL) {
subtimer = subtimerlist->subtimer_list;
while (subtimer != NULL) {
if (strcmp(subtimer->label, label) == 0) {
break;
}
else {
subtimer = subtimer->next;
}
}
}
if (category != pb_TimerID_NONE) {
if (subtimerlist != NULL) {
subtimerlist->current = subtimer;
}
if (category != timers->current && subtimer != NULL) {
pb_StartTimerAndSubTimer(&timers->timers[category], &subtimer->timer);
}
else if (subtimer != NULL) {
// Same category, different non-NULL subtimer
pb_StartTimer(&subtimer->timer);
}
else {
// Different category, but no subtimer (not found or specified as NULL) -- unprefered way of setting topLevel timer
pb_StartTimer(&timers->timers[category]);
}
}
timers->current = category;
}
void
pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer)
{
/* Stop the currently running timer */
if (timers->current != pb_TimerID_NONE) {
struct pb_SubTimer *currSubTimer = NULL;
struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];
if (subtimerlist != NULL) {
currSubTimer = timers->sub_timer_list[timers->current]->current;
}
if (currSubTimer != NULL) {
pb_StopTimerAndSubTimer(&timers->timers[timers->current], &currSubTimer->timer);
}
else {
pb_StopTimer(&timers->timers[timers->current]);
}
}
timers->current = timer;
if (timer != pb_TimerID_NONE) {
pb_StartTimer(&timers->timers[timer]);
}
}
void
pb_PrintTimerSet(struct pb_TimerSet *timers)
{
pb_Timestamp wall_end = get_time();
struct pb_Timer *t = timers->timers;
struct pb_SubTimer* sub = NULL;
int maxSubLength;
const char *categories[] = {
"IO", "Kernel", "Copy", "Driver", "Copy Async", "Compute"
};
const int maxCategoryLength = 10;
int i;
for (i = 1; i < pb_TimerID_LAST - 1; ++i) { // exclude NONE and OVRELAP from this format
if (pb_GetElapsedTime(&t[i]) != 0) {
// Print Category Timer
printf("%-*s: %f\n", maxCategoryLength, categories[i - 1], pb_GetElapsedTime(&t[i]));
if (timers->sub_timer_list[i] != NULL) {
sub = timers->sub_timer_list[i]->subtimer_list;
maxSubLength = 0;
while (sub != NULL) {
// Find longest SubTimer label
if (strlen(sub->label) > maxSubLength) {
maxSubLength = strlen(sub->label);
}
sub = sub->next;
}
// Fit to Categories
if (maxSubLength <= maxCategoryLength) {
maxSubLength = maxCategoryLength;
}
sub = timers->sub_timer_list[i]->subtimer_list;
// Print SubTimers
while (sub != NULL) {
printf(" -%-*s: %f\n", maxSubLength, sub->label, pb_GetElapsedTime(&sub->timer));
sub = sub->next;
}
}
}
}
if (pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]) != 0)
printf("CPU/Kernel Overlap: %f\n", pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]));
float walltime = (wall_end - timers->wall_begin) / 1e6;
printf("Timer Wall Time: %f\n", walltime);
}
void pb_DestroyTimerSet(struct pb_TimerSet * timers)
{
/* clean up all of the async event markers */
struct pb_async_time_marker_list ** event = &(timers->async_markers);
while (*event != NULL) {
struct pb_async_time_marker_list ** next = &((*event)->next);
free(*event);
(*event) = NULL;
event = next;
}
int i = 0;
for (i = 0; i < pb_TimerID_LAST; ++i) {
if (timers->sub_timer_list[i] != NULL) {
struct pb_SubTimer *subtimer = timers->sub_timer_list[i]->subtimer_list;
struct pb_SubTimer *prev = NULL;
while (subtimer != NULL) {
free(subtimer->label);
prev = subtimer;
subtimer = subtimer->next;
free(prev);
}
free(timers->sub_timer_list[i]);
}
}
}
extern char *optarg;
void usage(char *name)
{
printf("Usage: %s <-d data_file_name> <-r rnd_file_name> "
"<-m rnd_count> <-p count> <-o file_name>\n", name);
exit(0);
}
void parse_args(int argc, char **argv, options* args)
{
int c;
args->data_name = NULL;
args->random_name = NULL;
args->random_count = 0;
args->npoints = 0;
args->output_name = NULL;
while ((c = getopt(argc, argv, "d:n:r:p:o:")) != EOF)
{
switch (c)
{
case 'd':
args->data_name = optarg;
break;
case 'r':
args->random_name = optarg;
break;
case 'n':
args->random_count = atoi(optarg);
break;
case 'o':
args->output_name = optarg;
break;
case 'p':
args->npoints = atol(optarg);
break;
default:
usage(argv[0]);
}
}
}
typedef unsigned long hist_t;
struct spherical
{
float ra, dec; // latitude, longitude pair
};
struct cartesian
{
double x, y, z; // cartesian coodrinates
};
int readdatafile(char *fname, struct cartesian *data, int npoints);
int doCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, long long *data_bins,
int nbins, float *binb);
void initBinB(struct pb_TimerSet *timers);
int readdatafile(char *fname, struct cartesian *data, int npoints)
{
FILE *infile;
int lcount = 0;
float ra, dec;
if ((infile = fopen(fname, "r")) == NULL)
{
fprintf(stderr, "Unable to open data file %s for reading\n", fname);
return lcount;
}
for (lcount = 0; lcount < npoints; lcount++)
{
if (fscanf(infile, "%f %f", &ra, &dec) != 2)
break;
{
// data conversion
float rarad = D2R * ra;
float decrad = D2R * dec;
float cd = cos(decrad);
data[lcount].x = cos(rarad) * cd;
data[lcount].y = sin(rarad) * cd;
data[lcount].z = sin(decrad);
}
}
fclose(infile);
return lcount;
}
int doCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, long long *data_bins,
int nbins, double *binb)
{
int i, j, k;
if (doSelf)
{
n2 = n1;
data2 = data1;
}
for (i = 0; i < ((doSelf) ? n1 - 1 : n1); i++)
{
const register double xi = data1[i].x;
const register double yi = data1[i].y;
const register double zi = data1[i].z;
for (j = ((doSelf) ? i + 1 : 0); j < n2; j++)
{
register double dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
register int min = 0;
register int max = nbins;
register int k, indx;
while (max > min + 1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
if (dot >= binb[min])
{
/*if (min == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
}*/
data_bins[min] += 1; /*k = min;*/
/*if (min == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
}*/
}
else if (dot < binb[max])
{
/*if (max + 1 == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
}*/
data_bins[max + 1] += 1; /*k = max+1;*/
/* if (max + 1 == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
}*/
}
else
{
/*if (max == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
data_bins[max] += 1; /*k = max;*/
/*if (max == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
}
}
}
return 0;
}
int sequential( struct pb_Parameters *params, options args, Result_Vect *results )
{
struct pb_TimerSet timers;
int rf, k, nbins, npd, npr;
double *binb, w;
long long *DD, *RRS, *DRS;
size_t memsize;
struct cartesian *data, *random;
FILE *outfile;
double ctime;
double ctime1;
double ctime2;
pb_InitializeTimerSet( &timers );
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
nbins = (int)floor(bins_per_dec * (log10(max_arcmin) -
log10(min_arcmin)));
memsize = (nbins+2)*sizeof(long long);
printf("SEQUENTIAL RUN \n");
ctime1 = cpu_time ( );
// memory for bin boundaries
binb = (double *)malloc((nbins+1)*sizeof(double));
if (binb == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
for (k = 0; k < nbins+1; k++)
{
binb[k] = cos(pow(10, log10(min_arcmin) +
k*1.0/bins_per_dec) / 60.0*D2R);
}
// memory for DD
DD = (long long*)malloc(memsize);
if (DD == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DD, memsize);
// memory for RR
RRS = (long long*)malloc(memsize);
if (RRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(RRS, memsize);
// memory for DR
DRS = (long long*)malloc(memsize);
if (DRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DRS, memsize);
// memory for input data
data = (struct cartesian*)malloc
(args.npoints* sizeof(struct cartesian));
if (data == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#1)\n",
args.npoints);
return(0);
}
random = (struct cartesian*)malloc
(args.npoints*sizeof(struct cartesian));
if (random == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#2)\n",
args.npoints);
return(0);
}
printf("Min distance: %f arcmin\n", min_arcmin);
printf("Max distance: %f arcmin\n", max_arcmin);
printf("Bins per dec: %i\n", bins_per_dec);
printf("Total bins : %i\n", nbins);
// read data file
pb_SwitchToTimer( &timers, pb_TimerID_IO );
npd = readdatafile(params->inpFiles[0], data, args.npoints);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
if (npd != args.npoints)
{
fprintf(stderr,
"Error: read %i data points out of %i\n",
npd, args.npoints);
return(0);
}
// compute DD
doCompute(data, npd, NULL, 0, 1, DD, nbins, binb);
// loop through random data files
for (rf = 0; rf < args.random_count; rf++)
{
// read random file
pb_SwitchToTimer( &timers, pb_TimerID_IO );
npr = readdatafile(params->inpFiles[rf+1], random, args.npoints);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
if (npr != args.npoints)
{
fprintf(stderr,
"Error: read %i random points out of %i in file %s\n",
npr, args.npoints, params->inpFiles[rf+1]);
return(0);
}
// compute RR
doCompute(random, npr, NULL, 0, 1, RRS, nbins, binb);
// compute DR
doCompute(data, npd, random, npr, 0, DRS, nbins, binb);
}
// compute and output results
if ((outfile = fopen(params->outFile, "w")) == NULL)
{
fprintf(stderr,
"Unable to open output file %s for writing, assuming stdout\n",
params->outFile);
outfile = stdout;
}
ctime2 = cpu_time ( );
ctime = ctime2 - ctime1;
results->time = ctime;
results->val_size = nbins * 3;
results->value = (double*)malloc(sizeof(double) * nbins*3);
pb_SwitchToTimer( &timers, pb_TimerID_IO );
for (k = 1; k < nbins+1; k++)
{
fprintf(outfile, "%d\n%d\n%d\n", DD[k], DRS[k], RRS[k]);
results->value[(k-1) * 3] = DD[k];
results->value[(k-1) * 3 + 1] = DRS[k];
results->value[(k-1) * 3 + 2] = RRS[k];
// printf("upisujem %d %d %d\n", (k-1) * 3, (k-1)*3 + 1, (k-1) * 3 + 2);
}
// printf("hmm seq ? %d %d\n", DRS[k], RRS[k]);
if(outfile != stdout)
fclose(outfile);
// free memory
free(data);
free(random);
free(binb);
free(DD);
free(RRS);
free(DRS);
pb_SwitchToTimer( &timers, pb_TimerID_NONE );
pb_PrintTimerSet( &timers );
}
__global__ void parallel_compute_kernel_self(struct cartesian *data1, int n1, struct cartesian *data2, int n2, unsigned long long int *data_bins, int nbins, double *binb) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n1-1 && j < n2 && j > i ) {
const double xi = data1[i].x;
const double yi = data1[i].y;
const double zi = data1[i].z;
double dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
int min = 0;
int max = nbins;
int k, indx;
while (max > min + 1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
if (dot >= binb[min])
{
//if (min == 1) {
// //printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
//}
atomicAdd(data_bins + min, 1);
//data_bins[min] += 1; /*k = min;*/
//if (min == 1) {
// //printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
//}
}
else if (dot < binb[max])
{
//if (max + 1 == 1) {
// //printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
//}
atomicAdd(data_bins + max + 1, 1);
//data_bins[max + 1] += 1; /*k = max+1;*/
//if (max + 1 == 1) {
// //printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
//}
}
else
{
//if (max == 1) {
// //printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
//}
atomicAdd(data_bins + max, 1);
//data_bins[max] += 1; /*k = max;*/
//if (max == 1) {
// //printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
//}
}
}
}
__global__ void parallel_compute_kernel(struct cartesian *data1, int n1, struct cartesian *data2, int n2, unsigned long long int *data_bins, int nbins, double *binb) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n1 && j < n2) {
const double xi = data1[i].x;
const double yi = data1[i].y;
const double zi = data1[i].z;
double dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
int min = 0;
int max = nbins;
int k, indx;
while (max > min + 1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
if (dot >= binb[min])
{
/*if (min == 1) {
printf("incrementing binb %d %d 1 %d\n", i,j, data_bins[min]);
}*/
atomicAdd(data_bins + min, 1);
//data_bins[min] += 1; /*k = min;*/
/*if (min == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
}*/
}
else if (dot < binb[max])
{
/*if (max + 1 == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max+1]);
}*/
atomicAdd(data_bins + max + 1, 1);
//data_bins[max + 1] += 1; /*k = max+1;*/
/*if (max + 1 == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max+1]);
}*/
}
else
{
/*if (max == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
atomicAdd(data_bins + max, 1);
//data_bins[max] += 1; /*k = max;*/
/*if (max == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
}
}
}
int parallelDoCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, unsigned long long int *data_bins,
int nbins, double *binbCuda)
{
int i, j, k;
if (doSelf)
{
n2 = n1;
data2 = data1;
}
struct cartesian *data1Cuda, *data2Cuda;
hipMalloc((void **)&data1Cuda, n1 * sizeof(struct cartesian));
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc data1Cuda\n");
}
hipMemcpy(data1Cuda, data1, n1 * sizeof(struct cartesian), hipMemcpyHostToDevice);
if (hipSuccess != hipGetLastError()) {
printf("error hipMemcpy data1Cuda\n");
}
hipMalloc((void **)&data2Cuda, n2 * sizeof(struct cartesian));
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc data2Cuda\n");
}
hipMemcpy(data2Cuda, data2, n2 * sizeof(struct cartesian), hipMemcpyHostToDevice);
if (hipSuccess != hipGetLastError()) {
printf("error hipMemcpy data2\n");
}
if (doSelf) {
dim3 threadsPerBlock(32, 16);
dim3 numBlocks((n1 - 1 + threadsPerBlock.x - 1) / threadsPerBlock.x, (n2 + threadsPerBlock.y - 1) / threadsPerBlock.y);
//for (int i = 0; i < n1 - 1; i++)
//{
// const register float xi = data1[i].x;
// const register float yi = data1[i].y;
// const register float zi = data1[i].z;
// for (int j = i + 1; j < n2; j++)
// {
// register float dot = xi * data2[j].x + yi * data2[j].y +
// zi * data2[j].z;
// // run binary search
// register int min = 0;
// register int max = nbins;
// register int k, indx;
// while (max > min + 1)
// {
// k = (min + max) / 2;
// if (dot >= binbCuda[k])
// max = k;
// else
// min = k;
// };
// if (dot >= binbCuda[min])
// {
// data_bins[min] += 1; /*k = min;*/
// }
// else if (dot < binbCuda[max])
// {
// data_bins[max + 1] += 1; /*k = max+1;*/
// }
// else
// {
// data_bins[max] += 1; /*k = max;*/
// }
// }
//}
parallel_compute_kernel_self << < numBlocks, threadsPerBlock >> > (data1Cuda, n1, data2Cuda, n2, data_bins, nbins, binbCuda);
if (hipSuccess != hipGetLastError()) {
printf("error parallel_compute_kernel_self\n");
}
}
else {
dim3 threadsPerBlock(32, 16);
dim3 numBlocks((n1 + threadsPerBlock.x - 1) / threadsPerBlock.x, (n2 + threadsPerBlock.y - 1) / threadsPerBlock.y);
//for (int i = 0; i < n1; i++)
//{
// const register float xi = data1[i].x;
// const register float yi = data1[i].y;
// const register float zi = data1[i].z;
// for (int j = 0; j < n2; j++)
// {
// register float dot = xi * data2[j].x + yi * data2[j].y +
// zi * data2[j].z;
// // run binary search
// register int min = 0;
// register int max = nbins;
// register int k, indx;
// while (max > min + 1)
// {
// k = (min + max) / 2;
// if (dot >= binbCuda[k])
// max = k;
// else
// min = k;
// };
// if (dot >= binbCuda[min])
// {
// data_bins[min] += 1; /*k = min;*/
// }
// else if (dot < binbCuda[max])
// {
// data_bins[max + 1] += 1; /*k = max+1;*/
// }
// else
// {
// data_bins[max] += 1; /*k = max;*/
// }
// }
//}
parallel_compute_kernel << < numBlocks, threadsPerBlock >> > (data1Cuda, n1, data2Cuda, n2, data_bins, nbins, binbCuda);
if (hipSuccess != hipGetLastError()) {
printf("error parallel_compute_kernel\n");
}
}
hipFree(data1Cuda);
hipFree(data2Cuda);
return 0;
}
int parallel(struct pb_Parameters *params, options args, Result_Vect *results)
{
struct pb_TimerSet timers;
int rf, k, nbins, npd, npr;
double *binb, w;
long long *DD;
unsigned long long int *RRS, *DRS;
unsigned long long int *RRSCuda, *DRSCuda;
size_t memsize;
size_t memsize2;
struct cartesian *data, *random;
FILE *outfile;
double ctime;
double ctime1;
double ctime2;
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
nbins = (int)floor(bins_per_dec * (log10(max_arcmin) -
log10(min_arcmin)));
memsize = (nbins + 2) * sizeof(long long);
memsize2 = (nbins + 2) * sizeof(unsigned long long int);
printf("PARALLEL RUN \n");
ctime1 = cpu_time();
// memory for bin boundaries
binb = (double *)malloc((nbins + 1) * sizeof(double));
if (binb == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
for (k = 0; k < nbins + 1; k++)
{
binb[k] = cos(pow(10, log10(min_arcmin) +
k*1.0 / bins_per_dec) / 60.0*D2R);
}
double *binbCuda;
hipMalloc((void **)&binbCuda, (nbins + 1) * sizeof(double));
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc binbcuda\n");
}
hipMemcpy(binbCuda, binb, (nbins + 1) * sizeof(double), hipMemcpyHostToDevice);
if (hipSuccess != hipGetLastError()) {
printf("error hipMemcpy binbcuda\n");
}
// memory for DD
DD = (long long*)malloc(memsize);
if (DD == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DD, memsize);
// memory for RR
RRS = (unsigned long long int*)malloc(memsize2);
if (RRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(RRS, memsize2);
//printf("memsize %d\n", memsize);
hipMalloc((void **)&RRSCuda, memsize2);
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc rrs\n");
}
hipMemset(RRSCuda, 0, memsize2);
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc rrs\n");
}
// memory for DR
DRS = (unsigned long long int*)malloc(memsize2);
if (DRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DRS, memsize2);
hipMalloc((void **)&DRSCuda, memsize2);
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc drs\n");
}
hipMemset(DRSCuda, 0, memsize2);
if (hipSuccess != hipGetLastError()) {
printf("error hipMalloc drs\n");
}
// memory for input data
data = (struct cartesian*)malloc
(args.npoints * sizeof(struct cartesian));
if (data == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#1)\n",
args.npoints);
return(0);
}
random = (struct cartesian*)malloc
(args.npoints * sizeof(struct cartesian));
if (random == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#2)\n",
args.npoints);
return(0);
}
printf("Min distance: %f arcmin\n", min_arcmin);
printf("Max distance: %f arcmin\n", max_arcmin);
printf("Bins per dec: %i\n", bins_per_dec);
printf("Total bins : %i\n", nbins);
// read data file
pb_SwitchToTimer(&timers, pb_TimerID_IO);
npd = readdatafile(params->inpFiles[0], data, args.npoints);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (npd != args.npoints)
{
fprintf(stderr,
"Error: read %i data points out of %i\n",
npd, args.npoints);
return(0);
}
// compute DD
doCompute(data, npd, NULL, 0, 1, DD, nbins, binb);
// loop through random data files
for (rf = 0; rf < args.random_count; rf++)
{
// read random file
pb_SwitchToTimer(&timers, pb_TimerID_IO);
npr = readdatafile(params->inpFiles[rf + 1], random, args.npoints);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (npr != args.npoints)
{
fprintf(stderr,
"Error: read %i random points out of %i in file %s\n",
npr, args.npoints, params->inpFiles[rf + 1]);
return(0);
}
// compute RR
parallelDoCompute(random, npr, NULL, 0, 1, RRSCuda, nbins, binbCuda);
// compute DR
parallelDoCompute(data, npd, random, npr, 0, DRSCuda, nbins, binbCuda);
}
hipDeviceSynchronize();
if (hipSuccess != hipGetLastError()) {
printf("errorr device sync\n");
}
hipMemcpy(RRS, RRSCuda, memsize2, hipMemcpyDeviceToHost);
hipMemcpy(DRS, DRSCuda, memsize2, hipMemcpyDeviceToHost);
// compute and output results
if ((outfile = fopen(params->outFile, "w")) == NULL)
{
fprintf(stderr,
"Unable to open output file %s for writing, assuming stdout\n",
params->outFile);
outfile = stdout;
}
ctime2 = cpu_time();
ctime = ctime2 - ctime1;
results->time = ctime;
results->val_size = nbins * 3;
results->value = (double*)malloc(sizeof(double) * nbins * 3);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
for (k = 1; k < nbins + 1; k++)
{
fprintf(outfile, "%d\n%d\n%d\n", DD[k], DRS[k], RRS[k]);
results->value[(k - 1) * 3] = DD[k];
results->value[(k - 1) * 3 + 1] = DRS[k];
results->value[(k - 1) * 3 + 2] = RRS[k];
// printf("upisujem %d %d %d\n", (k-1) * 3, (k-1)*3 + 1, (k-1) * 3 + 2);
}
// printf("hmm seq ? %d %d\n", DRS[k], RRS[k]);
if (outfile != stdout)
fclose(outfile);
// free memory
free(data);
free(random);
free(binb);
free(DD);
free(RRS);
free(DRS);
hipFree(RRSCuda);
hipFree(DRSCuda);
hipFree(binbCuda);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
}
int main(int argc, char * argv[]) {
Result_Vect seq_result, par_result;
struct pb_Parameters *params;
params = pb_ReadParameters( &argc, argv );
options args;
parse_args( argc, argv, &args );
sequential(params, args, &seq_result);
parallel(params, args, &par_result);
pb_FreeParameters( params );
compare_and_print_vect(seq_result, par_result, "heated plate");
}
| 46288142512582199e0f7181f66936316e11e965.cu | #include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <sys/time.h>
#include <math.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include <strings.h>
#include <math.h>
#include "common.h"
#define D2R M_PI/180.0
#define R2D 180.0/M_PI
#define R2AM 60.0*180.0/M_PI
#define bins_per_dec 5
#define min_arcmin 1.0
#define max_arcmin 10000.0
#define NUM_BINS 20
typedef struct _options_
{
char *data_name;
char *random_name;
int random_count;
int npoints;
char *output_name;
} options;
void usage(char *name);
void parse_args(int argc, char **argv, options* args);
double cpu_time ( void )
{
double value;
value = ( double ) clock ( ) / ( double ) CLOCKS_PER_SEC;
return value;
}
#ifndef _HEADER
#define _HEADER
#ifdef __cplusplus
extern "C" {
#endif
#include <unistd.h>
/* Command line parameters for benchmarks */
struct pb_Parameters {
char *outFile; /* If not NULL, the raw output of the
* computation should be saved to this
* file. The string is owned. */
char **inpFiles; /* A NULL-terminated array of strings
* holding the input file(s) for the
* computation. The array and strings
* are owned. */
};
/* Read command-line parameters.
*
* The argc and argv parameters to main are read, and any parameters
* interpreted by this function are removed from the argument list.
*
* A new instance of struct pb_Parameters is returned.
* If there is an error, then an error message is printed on stderr
* and NULL is returned.
*/
struct pb_Parameters *
pb_ReadParameters(int *_argc, char **argv);
/* Free an instance of struct pb_Parameters.
*/
void
pb_FreeParameters(struct pb_Parameters *p);
/* Count the number of input files in a pb_Parameters instance.
*/
int
pb_Parameters_CountInputs(struct pb_Parameters *p);
/* A time or duration. */
#if _POSIX_VERSION >= 200112L
typedef unsigned long long pb_Timestamp; /* time in microseconds */
#else
# error "Timestamps not implemented"
#endif
enum pb_TimerState {
pb_Timer_STOPPED,
pb_Timer_RUNNING,
};
struct pb_Timer {
enum pb_TimerState state;
pb_Timestamp elapsed; /* Amount of time elapsed so far */
pb_Timestamp init; /* Beginning of the current time interval,
* if state is RUNNING. End of the last
* recorded time interfal otherwise. */
};
/* Reset a timer.
* Use this to initialize a timer or to clear
* its elapsed time. The reset timer is stopped.
*/
void
pb_ResetTimer(struct pb_Timer *timer);
/* Start a timer. The timer is set to RUNNING mode and
* time elapsed while the timer is running is added to
* the timer.
* The timer should not already be running.
*/
void
pb_StartTimer(struct pb_Timer *timer);
/* Stop a timer.
* This stops adding elapsed time to the timer.
* The timer should not already be stopped.
*/
void
pb_StopTimer(struct pb_Timer *timer);
/* Get the elapsed time in seconds. */
double
pb_GetElapsedTime(struct pb_Timer *timer);
/* Execution time is assigned to one of these categories. */
enum pb_TimerID {
pb_TimerID_NONE = 0,
pb_TimerID_IO, /* Time spent in input/output */
pb_TimerID_KERNEL, /* Time spent computing on the device,
* recorded asynchronously */
pb_TimerID_COPY, /* Time spent synchronously moving data
* to/from device and allocating/freeing
* memory on the device */
pb_TimerID_DRIVER, /* Time spent in the host interacting with the
* driver, primarily for recording the time
* spent queueing asynchronous operations */
pb_TimerID_COPY_ASYNC, /* Time spent in asynchronous transfers */
pb_TimerID_COMPUTE, /* Time for all program execution other
* than parsing command line arguments,
* I/O, kernel, and copy */
pb_TimerID_OVERLAP, /* Time double-counted in asynchronous and
* host activity: automatically filled in,
* not intended for direct usage */
pb_TimerID_LAST /* Number of timer IDs */
};
/* Dynamic list of asynchronously tracked times between events */
struct pb_async_time_marker_list {
char *label; // actually just a pointer to a string
enum pb_TimerID timerID; /* The ID to which the interval beginning
* with this marker should be attributed */
void * marker;
//cudaEvent_t marker; /* The driver event for this marker */
struct pb_async_time_marker_list *next;
};
struct pb_SubTimer {
char *label;
struct pb_Timer timer;
struct pb_SubTimer *next;
};
struct pb_SubTimerList {
struct pb_SubTimer *current;
struct pb_SubTimer *subtimer_list;
};
/* A set of timers for recording execution times. */
struct pb_TimerSet {
enum pb_TimerID current;
struct pb_async_time_marker_list* async_markers;
pb_Timestamp async_begin;
pb_Timestamp wall_begin;
struct pb_Timer timers[pb_TimerID_LAST];
struct pb_SubTimerList *sub_timer_list[pb_TimerID_LAST];
};
/* Reset all timers in the set. */
void
pb_InitializeTimerSet(struct pb_TimerSet *timers);
void
pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category);
/* Select which timer the next interval of time should be accounted
* to. The selected timer is started and other timers are stopped.
* Using pb_TimerID_NONE stops all timers. */
void
pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer);
void
pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category);
/* Print timer values to standard output. */
void
pb_PrintTimerSet(struct pb_TimerSet *timers);
/* Release timer resources */
void
pb_DestroyTimerSet(struct pb_TimerSet * timers);
void
pb_SetOpenCL(void *clContextPtr, void *clCommandQueuePtr);
#ifdef __cplusplus
}
#endif
#endif
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if _POSIX_VERSION >= 200112L
# include <sys/time.h>
#endif
/* Free an array of owned strings. */
static void
free_string_array(char **string_array)
{
char **p;
if (!string_array) return;
for (p = string_array; *p; p++) free(*p);
free(string_array);
}
/* Parse a comma-delimited list of strings into an
* array of strings. */
static char **
read_string_array(char *in)
{
char **ret;
int i;
int count; /* Number of items in the input */
char *substring; /* Current substring within 'in' */
/* Count the number of items in the string */
count = 1;
for (i = 0; in[i]; i++) if (in[i] == ',') count++;
/* Allocate storage */
ret = (char **)malloc((count + 1) * sizeof(char *));
/* Create copies of the strings from the list */
substring = in;
for (i = 0; i < count; i++) {
char *substring_end;
int substring_length;
/* Find length of substring */
for (substring_end = substring;
(*substring_end != ',') && (*substring_end != 0);
substring_end++);
substring_length = substring_end - substring;
/* Allocate memory and copy the substring */
ret[i] = (char *)malloc(substring_length + 1);
memcpy(ret[i], substring, substring_length);
ret[i][substring_length] = 0;
/* go to next substring */
substring = substring_end + 1;
}
ret[i] = NULL; /* Write the sentinel value */
return ret;
}
struct argparse {
int argc; /* Number of arguments. Mutable. */
char **argv; /* Argument values. Immutable. */
int argn; /* Current argument number. */
char **argv_get; /* Argument value being read. */
char **argv_put; /* Argument value being written.
* argv_put <= argv_get. */
};
static void
initialize_argparse(struct argparse *ap, int argc, char **argv)
{
ap->argc = argc;
ap->argn = 0;
ap->argv_get = ap->argv_put = ap->argv = argv;
}
static void
finalize_argparse(struct argparse *ap)
{
/* Move the remaining arguments */
for (; ap->argn < ap->argc; ap->argn++)
*ap->argv_put++ = *ap->argv_get++;
}
/* Delete the current argument. */
static void
delete_argument(struct argparse *ap)
{
if (ap->argn >= ap->argc) {
fprintf(stderr, "delete_argument\n");
}
ap->argc--;
ap->argv_get++;
}
/* Go to the next argument. Also, move the current argument to its
* final location in argv. */
static void
next_argument(struct argparse *ap)
{
if (ap->argn >= ap->argc) {
fprintf(stderr, "next_argument\n");
}
/* Move argument to its new location. */
*ap->argv_put++ = *ap->argv_get++;
ap->argn++;
}
static int
is_end_of_arguments(struct argparse *ap)
{
return ap->argn == ap->argc;
}
static char *
get_argument(struct argparse *ap)
{
return *ap->argv_get;
}
static char *
consume_argument(struct argparse *ap)
{
char *ret = get_argument(ap);
delete_argument(ap);
return ret;
}
struct pb_Parameters *
pb_ReadParameters(int *_argc, char **argv)
{
char *err_message;
struct argparse ap;
struct pb_Parameters *ret =
(struct pb_Parameters *)malloc(sizeof(struct pb_Parameters));
/* Initialize the parameters structure */
ret->outFile = NULL;
ret->inpFiles = (char **)malloc(sizeof(char *));
ret->inpFiles[0] = NULL;
/* Each argument */
initialize_argparse(&ap, *_argc, argv);
while (!is_end_of_arguments(&ap)) {
char *arg = get_argument(&ap);
/* Single-character flag */
if ((arg[0] == '-') && (arg[1] != 0) && (arg[2] == 0)) {
delete_argument(&ap); /* This argument is consumed here */
switch (arg[1]) {
case 'o': /* Output file name */
if (is_end_of_arguments(&ap))
{
err_message = "Expecting file name after '-o'\n";
goto error;
}
free(ret->outFile);
ret->outFile = strdup(consume_argument(&ap));
break;
case 'i': /* Input file name */
if (is_end_of_arguments(&ap))
{
err_message = "Expecting file name after '-i'\n";
goto error;
}
ret->inpFiles = read_string_array(consume_argument(&ap));
break;
case '-': /* End of options */
goto end_of_options;
default:
err_message = "Unexpected command-line parameter\n";
goto error;
}
}
else {
/* Other parameters are ignored */
next_argument(&ap);
}
} /* end for each argument */
end_of_options:
*_argc = ap.argc; /* Save the modified argc value */
finalize_argparse(&ap);
return ret;
error:
fputs(err_message, stderr);
pb_FreeParameters(ret);
return NULL;
}
void
pb_FreeParameters(struct pb_Parameters *p)
{
char **cpp;
free(p->outFile);
free_string_array(p->inpFiles);
free(p);
}
int
pb_Parameters_CountInputs(struct pb_Parameters *p)
{
int n;
for (n = 0; p->inpFiles[n]; n++);
return n;
}
/*****************************************************************************/
/* Timer routines */
static void
accumulate_time(pb_Timestamp *accum,
pb_Timestamp start,
pb_Timestamp end)
{
#if _POSIX_VERSION >= 200112L
* accum += end - start;
#else
# error "Timestamps not implemented for this system"
#endif
}
#if _POSIX_VERSION >= 200112L
static pb_Timestamp get_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (pb_Timestamp)(tv.tv_sec * 1000000LL + tv.tv_usec);
}
#else
# error "no supported time libraries are available on this platform"
#endif
void
pb_ResetTimer(struct pb_Timer *timer)
{
timer->state = pb_Timer_STOPPED;
#if _POSIX_VERSION >= 200112L
timer->elapsed = 0;
#else
# error "pb_ResetTimer: not implemented for this system"
#endif
}
void
pb_StartTimer(struct pb_Timer *timer)
{
if (timer->state != pb_Timer_STOPPED) {
fputs("Ignoring attempt to start a running timer\n", stderr);
return;
}
timer->state = pb_Timer_RUNNING;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
timer->init = tv.tv_sec * 1000000LL + tv.tv_usec;
}
#else
# error "pb_StartTimer: not implemented for this system"
#endif
}
void
pb_StartTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer)
{
unsigned int numNotStopped = 0x3; // 11
if (timer->state != pb_Timer_STOPPED) {
fputs("Warning: Timer was not stopped\n", stderr);
numNotStopped &= 0x1; // Zero out 2^1
}
if (subtimer->state != pb_Timer_STOPPED) {
fputs("Warning: Subtimer was not stopped\n", stderr);
numNotStopped &= 0x2; // Zero out 2^0
}
if (numNotStopped == 0x0) {
fputs("Ignoring attempt to start running timer and subtimer\n", stderr);
return;
}
timer->state = pb_Timer_RUNNING;
subtimer->state = pb_Timer_RUNNING;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
if (numNotStopped & 0x2) {
timer->init = tv.tv_sec * 1000000LL + tv.tv_usec;
}
if (numNotStopped & 0x1) {
subtimer->init = tv.tv_sec * 1000000LL + tv.tv_usec;
}
}
#else
# error "pb_StartTimer: not implemented for this system"
#endif
}
void
pb_StopTimer(struct pb_Timer *timer)
{
pb_Timestamp fini;
if (timer->state != pb_Timer_RUNNING) {
fputs("Ignoring attempt to stop a stopped timer\n", stderr);
return;
}
timer->state = pb_Timer_STOPPED;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
fini = tv.tv_sec * 1000000LL + tv.tv_usec;
}
#else
# error "pb_StopTimer: not implemented for this system"
#endif
accumulate_time(&timer->elapsed, timer->init, fini);
timer->init = fini;
}
void pb_StopTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) {
pb_Timestamp fini;
unsigned int numNotRunning = 0x3; // 0b11
if (timer->state != pb_Timer_RUNNING) {
fputs("Warning: Timer was not running\n", stderr);
numNotRunning &= 0x1; // Zero out 2^1
}
if (subtimer->state != pb_Timer_RUNNING) {
fputs("Warning: Subtimer was not running\n", stderr);
numNotRunning &= 0x2; // Zero out 2^0
}
if (numNotRunning == 0x0) {
fputs("Ignoring attempt to stop stopped timer and subtimer\n", stderr);
return;
}
timer->state = pb_Timer_STOPPED;
subtimer->state = pb_Timer_STOPPED;
#if _POSIX_VERSION >= 200112L
{
struct timeval tv;
gettimeofday(&tv, NULL);
fini = tv.tv_sec * 1000000LL + tv.tv_usec;
}
#else
# error "pb_StopTimer: not implemented for this system"
#endif
if (numNotRunning & 0x2) {
accumulate_time(&timer->elapsed, timer->init, fini);
timer->init = fini;
}
if (numNotRunning & 0x1) {
accumulate_time(&subtimer->elapsed, subtimer->init, fini);
subtimer->init = fini;
}
}
/* Get the elapsed time in seconds. */
double
pb_GetElapsedTime(struct pb_Timer *timer)
{
double ret;
if (timer->state != pb_Timer_STOPPED) {
fputs("Elapsed time from a running timer is inaccurate\n", stderr);
}
#if _POSIX_VERSION >= 200112L
ret = timer->elapsed / 1e6;
#else
# error "pb_GetElapsedTime: not implemented for this system"
#endif
return ret;
}
void
pb_InitializeTimerSet(struct pb_TimerSet *timers)
{
int n;
timers->wall_begin = get_time();
timers->current = pb_TimerID_NONE;
timers->async_markers = NULL;
for (n = 0; n < pb_TimerID_LAST; n++) {
pb_ResetTimer(&timers->timers[n]);
timers->sub_timer_list[n] = NULL; // free first?
}
}
void
pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category) {
struct pb_SubTimer *subtimer = (struct pb_SubTimer *) malloc
(sizeof(struct pb_SubTimer));
int len = strlen(label);
subtimer->label = (char *)malloc(sizeof(char)*(len + 1));
sprintf(subtimer->label, "%s\0", label);
pb_ResetTimer(&subtimer->timer);
subtimer->next = NULL;
struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[pb_Category];
if (subtimerlist == NULL) {
subtimerlist = (struct pb_SubTimerList *) malloc
(sizeof(struct pb_SubTimerList));
subtimerlist->subtimer_list = subtimer;
timers->sub_timer_list[pb_Category] = subtimerlist;
}
else {
// Append to list
struct pb_SubTimer *element = subtimerlist->subtimer_list;
while (element->next != NULL) {
element = element->next;
}
element->next = subtimer;
}
}
void
pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category)
{
// switchToSub( NULL, NONE
// switchToSub( NULL, some
// switchToSub( some, some
// switchToSub( some, NONE -- tries to find "some" in NONE's sublist, which won't be printed
struct pb_Timer *topLevelToStop = NULL;
if (timers->current != category && timers->current != pb_TimerID_NONE) {
// Switching to subtimer in a different category needs to stop the top-level current, different categoried timer.
// NONE shouldn't have a timer associated with it, so exclude from branch
topLevelToStop = &timers->timers[timers->current];
}
struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];
struct pb_SubTimer *curr = (subtimerlist == NULL) ? NULL : subtimerlist->current;
if (timers->current != pb_TimerID_NONE) {
if (curr != NULL && topLevelToStop != NULL) {
pb_StopTimerAndSubTimer(topLevelToStop, &curr->timer);
}
else if (curr != NULL) {
pb_StopTimer(&curr->timer);
}
else {
pb_StopTimer(topLevelToStop);
}
}
subtimerlist = timers->sub_timer_list[category];
struct pb_SubTimer *subtimer = NULL;
if (label != NULL) {
subtimer = subtimerlist->subtimer_list;
while (subtimer != NULL) {
if (strcmp(subtimer->label, label) == 0) {
break;
}
else {
subtimer = subtimer->next;
}
}
}
if (category != pb_TimerID_NONE) {
if (subtimerlist != NULL) {
subtimerlist->current = subtimer;
}
if (category != timers->current && subtimer != NULL) {
pb_StartTimerAndSubTimer(&timers->timers[category], &subtimer->timer);
}
else if (subtimer != NULL) {
// Same category, different non-NULL subtimer
pb_StartTimer(&subtimer->timer);
}
else {
// Different category, but no subtimer (not found or specified as NULL) -- unprefered way of setting topLevel timer
pb_StartTimer(&timers->timers[category]);
}
}
timers->current = category;
}
void
pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer)
{
/* Stop the currently running timer */
if (timers->current != pb_TimerID_NONE) {
struct pb_SubTimer *currSubTimer = NULL;
struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current];
if (subtimerlist != NULL) {
currSubTimer = timers->sub_timer_list[timers->current]->current;
}
if (currSubTimer != NULL) {
pb_StopTimerAndSubTimer(&timers->timers[timers->current], &currSubTimer->timer);
}
else {
pb_StopTimer(&timers->timers[timers->current]);
}
}
timers->current = timer;
if (timer != pb_TimerID_NONE) {
pb_StartTimer(&timers->timers[timer]);
}
}
void
pb_PrintTimerSet(struct pb_TimerSet *timers)
{
pb_Timestamp wall_end = get_time();
struct pb_Timer *t = timers->timers;
struct pb_SubTimer* sub = NULL;
int maxSubLength;
const char *categories[] = {
"IO", "Kernel", "Copy", "Driver", "Copy Async", "Compute"
};
const int maxCategoryLength = 10;
int i;
for (i = 1; i < pb_TimerID_LAST - 1; ++i) { // exclude NONE and OVRELAP from this format
if (pb_GetElapsedTime(&t[i]) != 0) {
// Print Category Timer
printf("%-*s: %f\n", maxCategoryLength, categories[i - 1], pb_GetElapsedTime(&t[i]));
if (timers->sub_timer_list[i] != NULL) {
sub = timers->sub_timer_list[i]->subtimer_list;
maxSubLength = 0;
while (sub != NULL) {
// Find longest SubTimer label
if (strlen(sub->label) > maxSubLength) {
maxSubLength = strlen(sub->label);
}
sub = sub->next;
}
// Fit to Categories
if (maxSubLength <= maxCategoryLength) {
maxSubLength = maxCategoryLength;
}
sub = timers->sub_timer_list[i]->subtimer_list;
// Print SubTimers
while (sub != NULL) {
printf(" -%-*s: %f\n", maxSubLength, sub->label, pb_GetElapsedTime(&sub->timer));
sub = sub->next;
}
}
}
}
if (pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]) != 0)
printf("CPU/Kernel Overlap: %f\n", pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]));
float walltime = (wall_end - timers->wall_begin) / 1e6;
printf("Timer Wall Time: %f\n", walltime);
}
void pb_DestroyTimerSet(struct pb_TimerSet * timers)
{
/* clean up all of the async event markers */
struct pb_async_time_marker_list ** event = &(timers->async_markers);
while (*event != NULL) {
struct pb_async_time_marker_list ** next = &((*event)->next);
free(*event);
(*event) = NULL;
event = next;
}
int i = 0;
for (i = 0; i < pb_TimerID_LAST; ++i) {
if (timers->sub_timer_list[i] != NULL) {
struct pb_SubTimer *subtimer = timers->sub_timer_list[i]->subtimer_list;
struct pb_SubTimer *prev = NULL;
while (subtimer != NULL) {
free(subtimer->label);
prev = subtimer;
subtimer = subtimer->next;
free(prev);
}
free(timers->sub_timer_list[i]);
}
}
}
extern char *optarg;
void usage(char *name)
{
printf("Usage: %s <-d data_file_name> <-r rnd_file_name> "
"<-m rnd_count> <-p count> <-o file_name>\n", name);
exit(0);
}
void parse_args(int argc, char **argv, options* args)
{
int c;
args->data_name = NULL;
args->random_name = NULL;
args->random_count = 0;
args->npoints = 0;
args->output_name = NULL;
while ((c = getopt(argc, argv, "d:n:r:p:o:")) != EOF)
{
switch (c)
{
case 'd':
args->data_name = optarg;
break;
case 'r':
args->random_name = optarg;
break;
case 'n':
args->random_count = atoi(optarg);
break;
case 'o':
args->output_name = optarg;
break;
case 'p':
args->npoints = atol(optarg);
break;
default:
usage(argv[0]);
}
}
}
typedef unsigned long hist_t;
struct spherical
{
float ra, dec; // latitude, longitude pair
};
struct cartesian
{
double x, y, z; // cartesian coodrinates
};
int readdatafile(char *fname, struct cartesian *data, int npoints);
int doCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, long long *data_bins,
int nbins, float *binb);
void initBinB(struct pb_TimerSet *timers);
int readdatafile(char *fname, struct cartesian *data, int npoints)
{
FILE *infile;
int lcount = 0;
float ra, dec;
if ((infile = fopen(fname, "r")) == NULL)
{
fprintf(stderr, "Unable to open data file %s for reading\n", fname);
return lcount;
}
for (lcount = 0; lcount < npoints; lcount++)
{
if (fscanf(infile, "%f %f", &ra, &dec) != 2)
break;
{
// data conversion
float rarad = D2R * ra;
float decrad = D2R * dec;
float cd = cos(decrad);
data[lcount].x = cos(rarad) * cd;
data[lcount].y = sin(rarad) * cd;
data[lcount].z = sin(decrad);
}
}
fclose(infile);
return lcount;
}
int doCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, long long *data_bins,
int nbins, double *binb)
{
int i, j, k;
if (doSelf)
{
n2 = n1;
data2 = data1;
}
for (i = 0; i < ((doSelf) ? n1 - 1 : n1); i++)
{
const register double xi = data1[i].x;
const register double yi = data1[i].y;
const register double zi = data1[i].z;
for (j = ((doSelf) ? i + 1 : 0); j < n2; j++)
{
register double dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
register int min = 0;
register int max = nbins;
register int k, indx;
while (max > min + 1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
if (dot >= binb[min])
{
/*if (min == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
}*/
data_bins[min] += 1; /*k = min;*/
/*if (min == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
}*/
}
else if (dot < binb[max])
{
/*if (max + 1 == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
}*/
data_bins[max + 1] += 1; /*k = max+1;*/
/* if (max + 1 == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
}*/
}
else
{
/*if (max == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
data_bins[max] += 1; /*k = max;*/
/*if (max == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
}
}
}
return 0;
}
int sequential( struct pb_Parameters *params, options args, Result_Vect *results )
{
struct pb_TimerSet timers;
int rf, k, nbins, npd, npr;
double *binb, w;
long long *DD, *RRS, *DRS;
size_t memsize;
struct cartesian *data, *random;
FILE *outfile;
double ctime;
double ctime1;
double ctime2;
pb_InitializeTimerSet( &timers );
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
nbins = (int)floor(bins_per_dec * (log10(max_arcmin) -
log10(min_arcmin)));
memsize = (nbins+2)*sizeof(long long);
printf("SEQUENTIAL RUN \n");
ctime1 = cpu_time ( );
// memory for bin boundaries
binb = (double *)malloc((nbins+1)*sizeof(double));
if (binb == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
for (k = 0; k < nbins+1; k++)
{
binb[k] = cos(pow(10, log10(min_arcmin) +
k*1.0/bins_per_dec) / 60.0*D2R);
}
// memory for DD
DD = (long long*)malloc(memsize);
if (DD == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DD, memsize);
// memory for RR
RRS = (long long*)malloc(memsize);
if (RRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(RRS, memsize);
// memory for DR
DRS = (long long*)malloc(memsize);
if (DRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DRS, memsize);
// memory for input data
data = (struct cartesian*)malloc
(args.npoints* sizeof(struct cartesian));
if (data == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#1)\n",
args.npoints);
return(0);
}
random = (struct cartesian*)malloc
(args.npoints*sizeof(struct cartesian));
if (random == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#2)\n",
args.npoints);
return(0);
}
printf("Min distance: %f arcmin\n", min_arcmin);
printf("Max distance: %f arcmin\n", max_arcmin);
printf("Bins per dec: %i\n", bins_per_dec);
printf("Total bins : %i\n", nbins);
// read data file
pb_SwitchToTimer( &timers, pb_TimerID_IO );
npd = readdatafile(params->inpFiles[0], data, args.npoints);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
if (npd != args.npoints)
{
fprintf(stderr,
"Error: read %i data points out of %i\n",
npd, args.npoints);
return(0);
}
// compute DD
doCompute(data, npd, NULL, 0, 1, DD, nbins, binb);
// loop through random data files
for (rf = 0; rf < args.random_count; rf++)
{
// read random file
pb_SwitchToTimer( &timers, pb_TimerID_IO );
npr = readdatafile(params->inpFiles[rf+1], random, args.npoints);
pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE );
if (npr != args.npoints)
{
fprintf(stderr,
"Error: read %i random points out of %i in file %s\n",
npr, args.npoints, params->inpFiles[rf+1]);
return(0);
}
// compute RR
doCompute(random, npr, NULL, 0, 1, RRS, nbins, binb);
// compute DR
doCompute(data, npd, random, npr, 0, DRS, nbins, binb);
}
// compute and output results
if ((outfile = fopen(params->outFile, "w")) == NULL)
{
fprintf(stderr,
"Unable to open output file %s for writing, assuming stdout\n",
params->outFile);
outfile = stdout;
}
ctime2 = cpu_time ( );
ctime = ctime2 - ctime1;
results->time = ctime;
results->val_size = nbins * 3;
results->value = (double*)malloc(sizeof(double) * nbins*3);
pb_SwitchToTimer( &timers, pb_TimerID_IO );
for (k = 1; k < nbins+1; k++)
{
fprintf(outfile, "%d\n%d\n%d\n", DD[k], DRS[k], RRS[k]);
results->value[(k-1) * 3] = DD[k];
results->value[(k-1) * 3 + 1] = DRS[k];
results->value[(k-1) * 3 + 2] = RRS[k];
// printf("upisujem %d %d %d\n", (k-1) * 3, (k-1)*3 + 1, (k-1) * 3 + 2);
}
// printf("hmm seq ? %d %d\n", DRS[k], RRS[k]);
if(outfile != stdout)
fclose(outfile);
// free memory
free(data);
free(random);
free(binb);
free(DD);
free(RRS);
free(DRS);
pb_SwitchToTimer( &timers, pb_TimerID_NONE );
pb_PrintTimerSet( &timers );
}
__global__ void parallel_compute_kernel_self(struct cartesian *data1, int n1, struct cartesian *data2, int n2, unsigned long long int *data_bins, int nbins, double *binb) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n1-1 && j < n2 && j > i ) {
const double xi = data1[i].x;
const double yi = data1[i].y;
const double zi = data1[i].z;
double dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
int min = 0;
int max = nbins;
int k, indx;
while (max > min + 1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
if (dot >= binb[min])
{
//if (min == 1) {
// //printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
//}
atomicAdd(data_bins + min, 1);
//data_bins[min] += 1; /*k = min;*/
//if (min == 1) {
// //printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
//}
}
else if (dot < binb[max])
{
//if (max + 1 == 1) {
// //printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
//}
atomicAdd(data_bins + max + 1, 1);
//data_bins[max + 1] += 1; /*k = max+1;*/
//if (max + 1 == 1) {
// //printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max + 1]);
//}
}
else
{
//if (max == 1) {
// //printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
//}
atomicAdd(data_bins + max, 1);
//data_bins[max] += 1; /*k = max;*/
//if (max == 1) {
// //printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
//}
}
}
}
__global__ void parallel_compute_kernel(struct cartesian *data1, int n1, struct cartesian *data2, int n2, unsigned long long int *data_bins, int nbins, double *binb) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n1 && j < n2) {
const double xi = data1[i].x;
const double yi = data1[i].y;
const double zi = data1[i].z;
double dot = xi * data2[j].x + yi * data2[j].y +
zi * data2[j].z;
// run binary search
int min = 0;
int max = nbins;
int k, indx;
while (max > min + 1)
{
k = (min + max) / 2;
if (dot >= binb[k])
max = k;
else
min = k;
};
if (dot >= binb[min])
{
/*if (min == 1) {
printf("incrementing binb %d %d 1 %d\n", i,j, data_bins[min]);
}*/
atomicAdd(data_bins + min, 1);
//data_bins[min] += 1; /*k = min;*/
/*if (min == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[min]);
}*/
}
else if (dot < binb[max])
{
/*if (max + 1 == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max+1]);
}*/
atomicAdd(data_bins + max + 1, 1);
//data_bins[max + 1] += 1; /*k = max+1;*/
/*if (max + 1 == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max+1]);
}*/
}
else
{
/*if (max == 1) {
printf("incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
atomicAdd(data_bins + max, 1);
//data_bins[max] += 1; /*k = max;*/
/*if (max == 1) {
printf("after incrementing binb %d %d 1 %d\n", i, j, data_bins[max]);
}*/
}
}
}
int parallelDoCompute(struct cartesian *data1, int n1, struct cartesian *data2,
int n2, int doSelf, unsigned long long int *data_bins,
int nbins, double *binbCuda)
{
int i, j, k;
if (doSelf)
{
n2 = n1;
data2 = data1;
}
struct cartesian *data1Cuda, *data2Cuda;
cudaMalloc((void **)&data1Cuda, n1 * sizeof(struct cartesian));
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc data1Cuda\n");
}
cudaMemcpy(data1Cuda, data1, n1 * sizeof(struct cartesian), cudaMemcpyHostToDevice);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMemcpy data1Cuda\n");
}
cudaMalloc((void **)&data2Cuda, n2 * sizeof(struct cartesian));
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc data2Cuda\n");
}
cudaMemcpy(data2Cuda, data2, n2 * sizeof(struct cartesian), cudaMemcpyHostToDevice);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMemcpy data2\n");
}
if (doSelf) {
dim3 threadsPerBlock(32, 16);
dim3 numBlocks((n1 - 1 + threadsPerBlock.x - 1) / threadsPerBlock.x, (n2 + threadsPerBlock.y - 1) / threadsPerBlock.y);
//for (int i = 0; i < n1 - 1; i++)
//{
// const register float xi = data1[i].x;
// const register float yi = data1[i].y;
// const register float zi = data1[i].z;
// for (int j = i + 1; j < n2; j++)
// {
// register float dot = xi * data2[j].x + yi * data2[j].y +
// zi * data2[j].z;
// // run binary search
// register int min = 0;
// register int max = nbins;
// register int k, indx;
// while (max > min + 1)
// {
// k = (min + max) / 2;
// if (dot >= binbCuda[k])
// max = k;
// else
// min = k;
// };
// if (dot >= binbCuda[min])
// {
// data_bins[min] += 1; /*k = min;*/
// }
// else if (dot < binbCuda[max])
// {
// data_bins[max + 1] += 1; /*k = max+1;*/
// }
// else
// {
// data_bins[max] += 1; /*k = max;*/
// }
// }
//}
parallel_compute_kernel_self << < numBlocks, threadsPerBlock >> > (data1Cuda, n1, data2Cuda, n2, data_bins, nbins, binbCuda);
if (cudaSuccess != cudaGetLastError()) {
printf("error parallel_compute_kernel_self\n");
}
}
else {
dim3 threadsPerBlock(32, 16);
dim3 numBlocks((n1 + threadsPerBlock.x - 1) / threadsPerBlock.x, (n2 + threadsPerBlock.y - 1) / threadsPerBlock.y);
//for (int i = 0; i < n1; i++)
//{
// const register float xi = data1[i].x;
// const register float yi = data1[i].y;
// const register float zi = data1[i].z;
// for (int j = 0; j < n2; j++)
// {
// register float dot = xi * data2[j].x + yi * data2[j].y +
// zi * data2[j].z;
// // run binary search
// register int min = 0;
// register int max = nbins;
// register int k, indx;
// while (max > min + 1)
// {
// k = (min + max) / 2;
// if (dot >= binbCuda[k])
// max = k;
// else
// min = k;
// };
// if (dot >= binbCuda[min])
// {
// data_bins[min] += 1; /*k = min;*/
// }
// else if (dot < binbCuda[max])
// {
// data_bins[max + 1] += 1; /*k = max+1;*/
// }
// else
// {
// data_bins[max] += 1; /*k = max;*/
// }
// }
//}
parallel_compute_kernel << < numBlocks, threadsPerBlock >> > (data1Cuda, n1, data2Cuda, n2, data_bins, nbins, binbCuda);
if (cudaSuccess != cudaGetLastError()) {
printf("error parallel_compute_kernel\n");
}
}
cudaFree(data1Cuda);
cudaFree(data2Cuda);
return 0;
}
int parallel(struct pb_Parameters *params, options args, Result_Vect *results)
{
struct pb_TimerSet timers;
int rf, k, nbins, npd, npr;
double *binb, w;
long long *DD;
unsigned long long int *RRS, *DRS;
unsigned long long int *RRSCuda, *DRSCuda;
size_t memsize;
size_t memsize2;
struct cartesian *data, *random;
FILE *outfile;
double ctime;
double ctime1;
double ctime2;
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
nbins = (int)floor(bins_per_dec * (log10(max_arcmin) -
log10(min_arcmin)));
memsize = (nbins + 2) * sizeof(long long);
memsize2 = (nbins + 2) * sizeof(unsigned long long int);
printf("PARALLEL RUN \n");
ctime1 = cpu_time();
// memory for bin boundaries
binb = (double *)malloc((nbins + 1) * sizeof(double));
if (binb == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
for (k = 0; k < nbins + 1; k++)
{
binb[k] = cos(pow(10, log10(min_arcmin) +
k*1.0 / bins_per_dec) / 60.0*D2R);
}
double *binbCuda;
cudaMalloc((void **)&binbCuda, (nbins + 1) * sizeof(double));
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc binbcuda\n");
}
cudaMemcpy(binbCuda, binb, (nbins + 1) * sizeof(double), cudaMemcpyHostToDevice);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMemcpy binbcuda\n");
}
// memory for DD
DD = (long long*)malloc(memsize);
if (DD == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DD, memsize);
// memory for RR
RRS = (unsigned long long int*)malloc(memsize2);
if (RRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(RRS, memsize2);
//printf("memsize %d\n", memsize);
cudaMalloc((void **)&RRSCuda, memsize2);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc rrs\n");
}
cudaMemset(RRSCuda, 0, memsize2);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc rrs\n");
}
// memory for DR
DRS = (unsigned long long int*)malloc(memsize2);
if (DRS == NULL)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(-1);
}
bzero(DRS, memsize2);
cudaMalloc((void **)&DRSCuda, memsize2);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc drs\n");
}
cudaMemset(DRSCuda, 0, memsize2);
if (cudaSuccess != cudaGetLastError()) {
printf("error cudaMalloc drs\n");
}
// memory for input data
data = (struct cartesian*)malloc
(args.npoints * sizeof(struct cartesian));
if (data == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#1)\n",
args.npoints);
return(0);
}
random = (struct cartesian*)malloc
(args.npoints * sizeof(struct cartesian));
if (random == NULL)
{
fprintf(stderr,
"Unable to allocate memory for % data points (#2)\n",
args.npoints);
return(0);
}
printf("Min distance: %f arcmin\n", min_arcmin);
printf("Max distance: %f arcmin\n", max_arcmin);
printf("Bins per dec: %i\n", bins_per_dec);
printf("Total bins : %i\n", nbins);
// read data file
pb_SwitchToTimer(&timers, pb_TimerID_IO);
npd = readdatafile(params->inpFiles[0], data, args.npoints);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (npd != args.npoints)
{
fprintf(stderr,
"Error: read %i data points out of %i\n",
npd, args.npoints);
return(0);
}
// compute DD
doCompute(data, npd, NULL, 0, 1, DD, nbins, binb);
// loop through random data files
for (rf = 0; rf < args.random_count; rf++)
{
// read random file
pb_SwitchToTimer(&timers, pb_TimerID_IO);
npr = readdatafile(params->inpFiles[rf + 1], random, args.npoints);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (npr != args.npoints)
{
fprintf(stderr,
"Error: read %i random points out of %i in file %s\n",
npr, args.npoints, params->inpFiles[rf + 1]);
return(0);
}
// compute RR
parallelDoCompute(random, npr, NULL, 0, 1, RRSCuda, nbins, binbCuda);
// compute DR
parallelDoCompute(data, npd, random, npr, 0, DRSCuda, nbins, binbCuda);
}
cudaDeviceSynchronize();
if (cudaSuccess != cudaGetLastError()) {
printf("errorr device sync\n");
}
cudaMemcpy(RRS, RRSCuda, memsize2, cudaMemcpyDeviceToHost);
cudaMemcpy(DRS, DRSCuda, memsize2, cudaMemcpyDeviceToHost);
// compute and output results
if ((outfile = fopen(params->outFile, "w")) == NULL)
{
fprintf(stderr,
"Unable to open output file %s for writing, assuming stdout\n",
params->outFile);
outfile = stdout;
}
ctime2 = cpu_time();
ctime = ctime2 - ctime1;
results->time = ctime;
results->val_size = nbins * 3;
results->value = (double*)malloc(sizeof(double) * nbins * 3);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
for (k = 1; k < nbins + 1; k++)
{
fprintf(outfile, "%d\n%d\n%d\n", DD[k], DRS[k], RRS[k]);
results->value[(k - 1) * 3] = DD[k];
results->value[(k - 1) * 3 + 1] = DRS[k];
results->value[(k - 1) * 3 + 2] = RRS[k];
// printf("upisujem %d %d %d\n", (k-1) * 3, (k-1)*3 + 1, (k-1) * 3 + 2);
}
// printf("hmm seq ? %d %d\n", DRS[k], RRS[k]);
if (outfile != stdout)
fclose(outfile);
// free memory
free(data);
free(random);
free(binb);
free(DD);
free(RRS);
free(DRS);
cudaFree(RRSCuda);
cudaFree(DRSCuda);
cudaFree(binbCuda);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
}
int main(int argc, char * argv[]) {
Result_Vect seq_result, par_result;
struct pb_Parameters *params;
params = pb_ReadParameters( &argc, argv );
options args;
parse_args( argc, argv, &args );
sequential(params, args, &seq_result);
parallel(params, args, &par_result);
pb_FreeParameters( params );
compare_and_print_vect(seq_result, par_result, "heated plate");
}
|
30e4e1a1f8d1a4d24858a1fd53678d146bec8eb1.hip | // !!! This is a file automatically generated by hipify!!!
char **d_data;
char **h_data = (char**)malloc(N*sizeof(char*));
for (int i = 0; i < N; i++) {
hipMalloc(&h_data[i], N);
hipMemcpy(h_data[i], data[i], N, ...);
}
hipMalloc(&d_data, N*sizeof(char*));
hipMemcpy(d_data, h_data, N*sizeof(char*), ...);
| 30e4e1a1f8d1a4d24858a1fd53678d146bec8eb1.cu |
char **d_data;
char **h_data = (char**)malloc(N*sizeof(char*));
for (int i = 0; i < N; i++) {
cudaMalloc(&h_data[i], N);
cudaMemcpy(h_data[i], data[i], N, ...);
}
cudaMalloc(&d_data, N*sizeof(char*));
cudaMemcpy(d_data, h_data, N*sizeof(char*), ...);
|
ff3ebdf211f5ae5da4b3d8dcca0b7267497eb281.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
// CUDA kernel for vector addition
__global__ void MatrixMul(int* a, int* b, int* c, int n) {
// row
int row = (blockIdx.y * blockDim.y) + threadIdx.y;
//col
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_sum = 0;
// boundary guard
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++)
{
temp_sum += a[row*n+k]*b[k*n+col];
}
c[row*n+col] = temp_sum;
}
}
// Initialize
void Mat_init(int* a, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
a[i * n + j] = rand() % 100;
}
}
}
// Check MatrixMul add result
void check_answer(int* a, int* b, int* c, int n) {
int* result = (int*)malloc(n * n * sizeof(int));
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
for (int k = 0; k < n; k++)
{
result[i * n + j] += a[i * n + k] * b[k * n + j];
}
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
assert(c[i * n + j] == result[i * n + j]);
}
}
}
int main() {
// matrix of size 1024 x 1024
int n = 1 << 10;
//host memory pointers
int* h_a, * h_b, * h_c;
// Allocation size for all vectors
size_t bytes = sizeof(int) * n * n;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
//device memory pointers
int* d_a, * d_b, * d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Initialize vectors a and b with random values between 0 and 99
Mat_init(h_a, n);
Mat_init(h_b, n);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
// Threadblock size
int BLOCKS = 16;
// Grid size
int GRID = (int)ceil(n / BLOCKS);
//use dim3 objects
dim3 grid(GRID, GRID);
dim3 threads(BLOCKS, BLOCKS);
// Launch kernel on default stream w/o shmem
hipLaunchKernelGGL(( MatrixMul) , dim3(grid), dim3(threads) , 0, 0, d_a, d_b, d_c, n);
//copy result back to host
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
// Check result for errors
check_answer(h_a, h_b, h_c, n);
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} | ff3ebdf211f5ae5da4b3d8dcca0b7267497eb281.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
// CUDA kernel for vector addition
__global__ void MatrixMul(int* a, int* b, int* c, int n) {
// row
int row = (blockIdx.y * blockDim.y) + threadIdx.y;
//col
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_sum = 0;
// boundary guard
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++)
{
temp_sum += a[row*n+k]*b[k*n+col];
}
c[row*n+col] = temp_sum;
}
}
// Initialize
void Mat_init(int* a, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
a[i * n + j] = rand() % 100;
}
}
}
// Check MatrixMul add result
void check_answer(int* a, int* b, int* c, int n) {
int* result = (int*)malloc(n * n * sizeof(int));
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
for (int k = 0; k < n; k++)
{
result[i * n + j] += a[i * n + k] * b[k * n + j];
}
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
assert(c[i * n + j] == result[i * n + j]);
}
}
}
int main() {
// matrix of size 1024 x 1024
int n = 1 << 10;
//host memory pointers
int* h_a, * h_b, * h_c;
// Allocation size for all vectors
size_t bytes = sizeof(int) * n * n;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
//device memory pointers
int* d_a, * d_b, * d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Initialize vectors a and b with random values between 0 and 99
Mat_init(h_a, n);
Mat_init(h_b, n);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
// Threadblock size
int BLOCKS = 16;
// Grid size
int GRID = (int)ceil(n / BLOCKS);
//use dim3 objects
dim3 grid(GRID, GRID);
dim3 threads(BLOCKS, BLOCKS);
// Launch kernel on default stream w/o shmem
MatrixMul <<<grid, threads >>> (d_a, d_b, d_c, n);
//copy result back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
// Check result for errors
check_answer(h_a, h_b, h_c, n);
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} |
8aafd741e6d11bef2ee1bbb6ceb1576124214510.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Name : hpc_final_projectSM.cu
// Author : Rashi Goyal
// Copyright : Your copyright notice
// Description : Color to Grayscale using CUDA & C++,
// To Run : nvcc hpc_final_projectSM.cu -lcublas -o hpc_final_projectSM.out
// Note : Please see report to understand how to run the code to get
// different outputs
//============================================================================
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
// #include "main.h"
// CUDA runtime
#include <hip/hip_runtime.h>
#include <rocblas.h>
#define TILE_WIDTH 8
// kernel implementation for Matrix Multiplication Naive (Non Shared)
__global__ void Convert_to_Grey_2d( int *d_gpu_matrix_in , int *d_gpu_matrix_out , const int WIDTH ){
// calculate row & col values for current thread
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
// calculate index using naive logic
int index=col*WIDTH + row;
int rgbvalue=d_gpu_matrix_in[index];
// extract RGB values from the Pixel data matrix
int blue = rgbvalue % 1000;
int green = ((rgbvalue % 1000000)-blue)/1000;
int red = ((rgbvalue / 1000000)-1000);
// calculate grey scale value from RGB values
d_gpu_matrix_out[index]=(int)((red*.299f) + (green*.587f) + (blue*.114f));
}
// kernel implementation for Matrix Multiplication Naive (Shared)
__global__ void Convert_to_Grey_shared( int *d_gpu_matrix_in , int *d_gpu_matrix_out , const int WIDTH ){
__shared__ int rgbvalue;
// calculate row & col values for current thread
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
// calculate index using naive logic
int index=col*WIDTH + row;
rgbvalue=d_gpu_matrix_in[index];
// extract RGB values from the Pixel data matrix
int blue = rgbvalue % 1000;
int green = ((rgbvalue % 1000000)-blue)/1000;
int red = ((rgbvalue / 1000000)-1000);
// calculate grey scale value from RGB values
d_gpu_matrix_out[index]=(int)((red*.299f) + (green*.587f) + (blue*.114f));
}
// structure to write data into file
struct lwrite
{
unsigned long value;
unsigned size;
lwrite( unsigned long value, unsigned size ):
value( value ), size( size )
{ }
};
// method to define operator for file operations
inline std::ostream& operator << ( std::ostream& outs, const lwrite& v )
{
unsigned long value = v.value;
for (unsigned cntr = 0; cntr < v.size; cntr++, value >>= 8)
outs.put( static_cast <char> (value & 0xFF) );
return outs;
}
// method to read data from fstream
template <typename Type>
void read(std::ifstream &fp, Type &result, std::size_t size) {
fp.read(reinterpret_cast<char*>(&result), size);
}
// Bitmap structure to store Bitmap Metadata
struct BMP
{
typedef int FXPT2DOT30;
typedef struct {
FXPT2DOT30 ciexyzX;
FXPT2DOT30 ciexyzY;
FXPT2DOT30 ciexyzZ;
} CIEXYZ;
typedef struct {
CIEXYZ ciexyzRed;
CIEXYZ ciexyzGreen;
CIEXYZ ciexyzBlue;
} CIEXYZTRIPLE;
// structure to store Bitmap Headers Metadata
struct {
unsigned short bfType; //type of format
unsigned int bfSize; //file size
unsigned short bfReserved1;
unsigned short bfReserved2;
unsigned int bfOffBits;
} BITMAPFILEHEADER;
// structure to store Bitmap Info Headers Metadata
struct {
unsigned int biSize;
unsigned int biWidth; //width of image
unsigned int biHeight; //height of image
unsigned short biPlanes;
unsigned short biBitCount;
unsigned int biCompression; //type of compression done
unsigned int biSizeImage;
unsigned int biXPelsPerMeter;
unsigned int biYPelsPerMeter;
unsigned int biClrUsed;
unsigned int biClrImportant;
unsigned int biRedMask; //Red value of pixel
unsigned int biGreenMask; //Green value of pixel
unsigned int biBlueMask; //Blue value of Pixel
unsigned int biAlphaMask; //alpha value of pixel
unsigned int biCSType;
CIEXYZTRIPLE biEndpoints;
unsigned int biGammaRed;
unsigned int biGammaGreen;
unsigned int biGammaBlue;
unsigned int biIntent;
unsigned int biProfileData;
unsigned int biProfileSize;
unsigned int biReserved;
} BITMAPINFOHEADER;
};
// structure to store RGBA values
typedef struct {
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
} RGBQUAD;
// method to extract RGBA 8 bits from BITMAP Data
unsigned char bitextract(const unsigned int byte, const unsigned int mask) {
if (mask == 0) {
return 0;
}
int maskBufer = mask, maskPadding = 0;
while (!(maskBufer & 1)) {
maskBufer >>= 1;
maskPadding++;
}
return (byte & mask) >> maskPadding;
}
// method to process BITMAP Data
void process_bitmap_file()
{
std::cout<<std::endl<<std::endl<<std::endl<<"################### High Performance Computing Project!! (Colored to Greyscale Conversion) ###################"<<std::endl<<std::endl;
/* Read Bitmap file */
char *fileName = (char *)"bmp_image1.bmp";
/* Validate if file is opening or not */
std::ifstream fileStream(fileName, std::ifstream::binary);
if (!fileStream) {
std::cout << "Error opening file '" << fileName << "'." << std::endl;
}
BMP bmp;
/* Read BITMAP file headers */
read(fileStream, bmp.BITMAPFILEHEADER.bfType, sizeof(bmp.BITMAPFILEHEADER.bfType));
read(fileStream, bmp.BITMAPFILEHEADER.bfSize, sizeof(bmp.BITMAPFILEHEADER.bfSize));
read(fileStream, bmp.BITMAPFILEHEADER.bfReserved1, sizeof(bmp.BITMAPFILEHEADER.bfReserved1));
read(fileStream, bmp.BITMAPFILEHEADER.bfReserved2, sizeof(bmp.BITMAPFILEHEADER.bfReserved2));
read(fileStream, bmp.BITMAPFILEHEADER.bfOffBits, sizeof(bmp.BITMAPFILEHEADER.bfOffBits));
/* Validate if image is of BMP format */
if (bmp.BITMAPFILEHEADER.bfType != 0x4D42) {
std::cout << "Error: '" << fileName << "' is not BMP file." << std::endl;
}
/* Read BITMAP info headers */
read(fileStream, bmp.BITMAPINFOHEADER.biSize, sizeof(bmp.BITMAPINFOHEADER.biSize));
/* Read if BITMAP headers are greater than 12*/
if (bmp.BITMAPINFOHEADER.biSize >= 12) {
read(fileStream, bmp.BITMAPINFOHEADER.biWidth, sizeof(bmp.BITMAPINFOHEADER.biWidth));
read(fileStream, bmp.BITMAPINFOHEADER.biHeight, sizeof(bmp.BITMAPINFOHEADER.biHeight));
read(fileStream, bmp.BITMAPINFOHEADER.biPlanes, sizeof(bmp.BITMAPINFOHEADER.biPlanes));
read(fileStream, bmp.BITMAPINFOHEADER.biBitCount, sizeof(bmp.BITMAPINFOHEADER.biBitCount));
}
int colorsCount = bmp.BITMAPINFOHEADER.biBitCount >> 3;
if (colorsCount < 3) {
colorsCount = 3;
}
int bitsOnColor = bmp.BITMAPINFOHEADER.biBitCount / colorsCount;
int maskValue = (1 << bitsOnColor) - 1;
/* Read if BITMAP headers are greater than 40 (Bitmap V1)*/
if (bmp.BITMAPINFOHEADER.biSize >= 40) {
read(fileStream, bmp.BITMAPINFOHEADER.biCompression, sizeof(bmp.BITMAPINFOHEADER.biCompression));
read(fileStream, bmp.BITMAPINFOHEADER.biSizeImage, sizeof(bmp.BITMAPINFOHEADER.biSizeImage));
read(fileStream, bmp.BITMAPINFOHEADER.biXPelsPerMeter, sizeof(bmp.BITMAPINFOHEADER.biXPelsPerMeter));
read(fileStream, bmp.BITMAPINFOHEADER.biYPelsPerMeter, sizeof(bmp.BITMAPINFOHEADER.biYPelsPerMeter));
read(fileStream, bmp.BITMAPINFOHEADER.biClrUsed, sizeof(bmp.BITMAPINFOHEADER.biClrUsed));
read(fileStream, bmp.BITMAPINFOHEADER.biClrImportant, sizeof(bmp.BITMAPINFOHEADER.biClrImportant));
}
bmp.BITMAPINFOHEADER.biRedMask = 0;
bmp.BITMAPINFOHEADER.biGreenMask = 0;
bmp.BITMAPINFOHEADER.biBlueMask = 0;
/* Read if BITMAP headers are greater than 52 (Bitmap V2)*/
if (bmp.BITMAPINFOHEADER.biSize >= 52) {
read(fileStream, bmp.BITMAPINFOHEADER.biRedMask, sizeof(bmp.BITMAPINFOHEADER.biRedMask));
read(fileStream, bmp.BITMAPINFOHEADER.biGreenMask, sizeof(bmp.BITMAPINFOHEADER.biGreenMask));
read(fileStream, bmp.BITMAPINFOHEADER.biBlueMask, sizeof(bmp.BITMAPINFOHEADER.biBlueMask));
}
if (bmp.BITMAPINFOHEADER.biRedMask == 0 || bmp.BITMAPINFOHEADER.biGreenMask == 0 || bmp.BITMAPINFOHEADER.biBlueMask == 0) {
bmp.BITMAPINFOHEADER.biRedMask = maskValue << (bitsOnColor * 2);
bmp.BITMAPINFOHEADER.biGreenMask = maskValue << bitsOnColor;
bmp.BITMAPINFOHEADER.biBlueMask = maskValue;
}
/* Read if BITMAP headers are greater than 56 (Bitmap V3)*/
if (bmp.BITMAPINFOHEADER.biSize >= 56) {
read(fileStream, bmp.BITMAPINFOHEADER.biAlphaMask, sizeof(bmp.BITMAPINFOHEADER.biAlphaMask));
} else {
bmp.BITMAPINFOHEADER.biAlphaMask = maskValue << (bitsOnColor * 3);
}
/* Read if BITMAP headers are greater than 108 (Bitmap V4)*/
if (bmp.BITMAPINFOHEADER.biSize >= 108) {
read(fileStream, bmp.BITMAPINFOHEADER.biCSType, sizeof(bmp.BITMAPINFOHEADER.biCSType));
read(fileStream, bmp.BITMAPINFOHEADER.biEndpoints, sizeof(bmp.BITMAPINFOHEADER.biEndpoints));
read(fileStream, bmp.BITMAPINFOHEADER.biGammaRed, sizeof(bmp.BITMAPINFOHEADER.biGammaRed));
read(fileStream, bmp.BITMAPINFOHEADER.biGammaGreen, sizeof(bmp.BITMAPINFOHEADER.biGammaGreen));
read(fileStream, bmp.BITMAPINFOHEADER.biGammaBlue, sizeof(bmp.BITMAPINFOHEADER.biGammaBlue));
}
/* Read if BITMAP headers are greater than 108 (Bitmap V5)*/
if (bmp.BITMAPINFOHEADER.biSize >= 124) {
read(fileStream, bmp.BITMAPINFOHEADER.biIntent, sizeof(bmp.BITMAPINFOHEADER.biIntent));
read(fileStream, bmp.BITMAPINFOHEADER.biProfileData, sizeof(bmp.BITMAPINFOHEADER.biProfileData));
read(fileStream, bmp.BITMAPINFOHEADER.biProfileSize, sizeof(bmp.BITMAPINFOHEADER.biProfileSize));
read(fileStream, bmp.BITMAPINFOHEADER.biReserved, sizeof(bmp.BITMAPINFOHEADER.biReserved));
}
if (bmp.BITMAPINFOHEADER.biSize != 12 && bmp.BITMAPINFOHEADER.biSize != 40 && bmp.BITMAPINFOHEADER.biSize != 52 &&
bmp.BITMAPINFOHEADER.biSize != 56 && bmp.BITMAPINFOHEADER.biSize != 108 && bmp.BITMAPINFOHEADER.biSize != 124) {
std::cout << "Error: Unsupported BMP format." << std::endl;
}
if (bmp.BITMAPINFOHEADER.biBitCount != 16 && bmp.BITMAPINFOHEADER.biBitCount != 24 && bmp.BITMAPINFOHEADER.biBitCount != 32) {
std::cout << "Error: Unsupported BMP bit count." << std::endl;
}
if (bmp.BITMAPINFOHEADER.biCompression != 0 && bmp.BITMAPINFOHEADER.biCompression != 3) {
std::cout << "Error: Unsupported BMP compression." << std::endl;
}
/* Setting up rows & columns in Image data*/
int rows=bmp.BITMAPINFOHEADER.biHeight;
int columns=bmp.BITMAPINFOHEADER.biWidth;
std::cout<<"------------- IMAGE DETAILS ------------- "<<std::endl<<std::endl;
std::cout<<" Image Size (Height,Width) : ("<<rows<<","<<columns<<")"<<std::endl;
std::cout<<" Number of Pixels : "<<rows*columns<<std::endl;
std::cout<<" Image Format : BMP"<<std::endl<<std::endl;
/* Print BITMAP headers*/
std::cout<<"------------- BIT MAP HEADER ------------- "<<std::endl<<std::endl;
std::cout<<"bfType : "<<bmp.BITMAPFILEHEADER.bfType <<std::endl;
std::cout<<"bfSize : "<<bmp.BITMAPFILEHEADER.bfSize <<std::endl;
std::cout<<"bfReserved1 : "<<bmp.BITMAPFILEHEADER.bfReserved1 <<std::endl;
std::cout<<"bfReserved2 : "<<bmp.BITMAPFILEHEADER.bfReserved2 <<std::endl;
std::cout<<"bfOffBits : "<<bmp.BITMAPFILEHEADER.bfOffBits <<std::endl<<std::endl;
std::cout<<"------------- BIT INFO HEADER ------------- "<<std::endl<<std::endl;
std::cout<<"biSize : "<<bmp.BITMAPINFOHEADER.biSize <<std::endl;
std::cout<<"biWidth : "<<bmp.BITMAPINFOHEADER.biWidth <<std::endl;
std::cout<<"biHeight : "<<bmp.BITMAPINFOHEADER.biHeight <<std::endl;
std::cout<<"biPlanes : "<<bmp.BITMAPINFOHEADER.biPlanes <<std::endl;
std::cout<<"biBitCount : "<<bmp.BITMAPINFOHEADER.biBitCount <<std::endl;
std::cout<<"biCompression : "<<bmp.BITMAPINFOHEADER.biCompression <<std::endl;
std::cout<<"biSizeImage : "<<bmp.BITMAPINFOHEADER.biSizeImage <<std::endl;
std::cout<<"biXPelsPerMeter : "<<bmp.BITMAPINFOHEADER.biXPelsPerMeter <<std::endl;
std::cout<<"biYPelsPerMeter : "<<bmp.BITMAPINFOHEADER.biYPelsPerMeter <<std::endl;
std::cout<<"biClrUsed : "<<bmp.BITMAPINFOHEADER.biClrUsed <<std::endl;
std::cout<<"biClrImportant : "<<bmp.BITMAPINFOHEADER.biClrImportant <<std::endl;
std::cout<<"biRedMask : "<<bmp.BITMAPINFOHEADER.biRedMask <<std::endl;
std::cout<<"biGreenMask : "<<bmp.BITMAPINFOHEADER.biGreenMask <<std::endl;
std::cout<<"biBlueMask : "<<bmp.BITMAPINFOHEADER.biBlueMask <<std::endl;
std::cout<<"biAlphaMask : "<<bmp.BITMAPINFOHEADER.biAlphaMask <<std::endl;
std::cout<<"biCSType : "<<bmp.BITMAPINFOHEADER.biCSType <<std::endl;
// std::cout<<"biEndpoints : "<<bmp.BITMAPINFOHEADER.biEndpoints <<std::endl;
std::cout<<"biGammaRed : "<<bmp.BITMAPINFOHEADER.biGammaRed <<std::endl;
std::cout<<"biGammaGreen : "<<bmp.BITMAPINFOHEADER.biGammaGreen <<std::endl;
std::cout<<"biGammaBlue : "<<bmp.BITMAPINFOHEADER.biGammaBlue <<std::endl;
std::cout<<"biIntent : "<<bmp.BITMAPINFOHEADER.biIntent <<std::endl;
std::cout<<"biProfileData : "<<bmp.BITMAPINFOHEADER.biProfileData <<std::endl;
std::cout<<"biProfileSize : "<<bmp.BITMAPINFOHEADER.biProfileSize <<std::endl;
std::cout<<"biReserved : "<<bmp.BITMAPINFOHEADER.biReserved <<std::endl;
int linePadding = ((bmp.BITMAPINFOHEADER.biWidth * (bmp.BITMAPINFOHEADER.biBitCount / 8)) % 4) & 3;
std::cout<<"linePadding : "<<linePadding<<std::endl;
/* Setting up RGBA structure to store Image data*/
RGBQUAD **rgbInfo = new RGBQUAD*[rows];
for (unsigned int i = 0; i < rows; i++) {
rgbInfo[i] = new RGBQUAD[columns];
}
/* Setting up Matrixs(rows * columns) to store RGB values of image data*/
int in_img_data[rows][columns]; // used to store Original Image Data at host
int serial_img_data[rows][columns]; // used to store Converted Image Data at host
int out_img_data[rows][columns]; // used to store Converted Image Data at host
int *d_gpu_matrix_in; // used to store Original Image Data at device
int *d_gpu_matrix_out; // used to store Converted Image Data at device
/* create device array hipMalloc ( (void **)&array_name, sizeofmatrixinbytes) */
hipMalloc((void **) &d_gpu_matrix_in , rows*columns*sizeof(int) ) ;
hipMalloc((void **) &d_gpu_matrix_out , rows*columns*sizeof(int)) ;
/* variables for performance calculations */
clock_t start;
clock_t end;
unsigned int bufer;
/* Starting to read bitmap file for RGBA values */
for (unsigned int i = 0; i < bmp.BITMAPINFOHEADER.biHeight; i++) {
for (unsigned int j = 0; j < bmp.BITMAPINFOHEADER.biWidth; j++) {
read(fileStream, bufer, bmp.BITMAPINFOHEADER.biBitCount / 8);
rgbInfo[i][j].rgbRed = bitextract(bufer, bmp.BITMAPINFOHEADER.biRedMask);
rgbInfo[i][j].rgbGreen = bitextract(bufer, bmp.BITMAPINFOHEADER.biGreenMask);
rgbInfo[i][j].rgbBlue = bitextract(bufer, bmp.BITMAPINFOHEADER.biBlueMask);
rgbInfo[i][j].rgbReserved = bitextract(bufer, bmp.BITMAPINFOHEADER.biAlphaMask);
/* storing RGBA values as 1+R+G+B Example R=001 G=019 B=255 will compute RGB Value as rgbValue= 1001019255*/
int rgbValue= 1000000000;
rgbValue=rgbValue+(((int)rgbInfo[i][j].rgbRed) *1000000);
rgbValue=rgbValue+(((int)rgbInfo[i][j].rgbGreen) *1000);
rgbValue=rgbValue+((int)rgbInfo[i][j].rgbBlue);
/* storing data into input matrix for Kernel*/
in_img_data[i][j]=rgbValue;
}
fileStream.seekg(linePadding, std::ios_base::cur);
}
start=clock();
for (unsigned int i = 0; i < bmp.BITMAPINFOHEADER.biHeight; i++) {
for (unsigned int j = 0; j < bmp.BITMAPINFOHEADER.biWidth; j++) {
/* Code for serial execution of Grey Scale computation */
int rgbValue= in_img_data[i][j];
int blue = rgbValue % 1000;
int green = ((rgbValue % 1000000)-blue)/1000;
int red = ((rgbValue / 1000000)-1000);
serial_img_data[i][j] =(int)((red*.299f) + (green*.587f) + (blue*.114f));
}
}
end=clock();
/* Measuring Performance */
double dNumOps =rows*columns;
double dSeconds = (end-start)/1000.0;
double gflops = 1.0e-9 * dNumOps/dSeconds;
/* Printing Performance */
std::cout<<"------------- Serial Implementation Performance ------------- "<<std::endl<<std::endl;
std::cout<<" Number of Operations : "<<dNumOps<<std::endl;
std::cout<<" Total time taken : "<<dSeconds*1000<<"ms"<<std::endl;
std::cout<<" GFlop per second : "<<gflops<<std::endl<<std::endl;
/* Starting of CUDA execution */
start=clock();
/* Define dimGrid & dimBlock */
dim3 dimGrid ( rows/TILE_WIDTH+1 , columns/TILE_WIDTH+1 ,1 ) ;
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ;
/* Transfer memory from Host to Device */
hipMemcpy ( d_gpu_matrix_in , in_img_data , rows*columns*sizeof(int) , hipMemcpyHostToDevice ) ;
/* Displaying Kernel Configurations */
std::cout<<"------------- Kernel Config ------------- "<<std::endl<<std::endl;
std::cout<<" Grid (x,y,z) : ("<<rows/TILE_WIDTH+1 <<","<<columns/TILE_WIDTH+1<<",1)"<<std::endl;
std::cout<<" Block (x,y,z) : ("<<TILE_WIDTH <<","<<TILE_WIDTH<<",1)"<<std::endl<<std::endl;
/* Kernel Execution */
hipLaunchKernelGGL(( Convert_to_Grey_2d) , dim3(dimGrid),dim3(dimBlock), 0, 0, d_gpu_matrix_in ,d_gpu_matrix_out , columns) ;
std::cout<<" Kernel running...."<<std::endl<<std::endl;
/* Transfer memory from Device to Host */
hipMemcpy(out_img_data , d_gpu_matrix_out , rows*columns*sizeof(int) ,hipMemcpyDeviceToHost) ;
/* End of CUDA execution */
end=clock();
/* Measuring Performance */
dNumOps =rows*columns;
dSeconds = (end-start)/1000.0;
gflops = 1.0e-9 * dNumOps/dSeconds;
/* Printing Performance */
std::cout<<"-------------Kernel Performance ------------- "<<std::endl<<std::endl;
std::cout<<" Number of Operations : "<<dNumOps<<std::endl;
std::cout<<" Total time taken : "<<dSeconds*1000<<"ms"<<std::endl;
std::cout<<" GFlop per second : "<<gflops<<std::endl<<std::endl;
/* Starting of shared kernel CUDA execution */
start=clock();
/* Displaying Kernel Configurations */
std::cout<<"-------------Shared Kernel Config ------------- "<<std::endl<<std::endl;
std::cout<<" Grid (x,y,z) : ("<<rows/TILE_WIDTH+1 <<","<<columns/TILE_WIDTH+1<<",1)"<<std::endl;
std::cout<<" Block (x,y,z) : ("<<TILE_WIDTH <<","<<TILE_WIDTH<<",1)"<<std::endl<<std::endl;
/* Kernel Execution */
hipLaunchKernelGGL(( Convert_to_Grey_shared) , dim3(dimGrid),dim3(dimBlock), 0, 0, d_gpu_matrix_in ,d_gpu_matrix_out , columns) ;
std::cout<<"Shared Kernel running...."<<std::endl<<std::endl;
/* Transfer memory from Device to Host */
hipMemcpy(out_img_data , d_gpu_matrix_out , rows*columns*sizeof(int) ,hipMemcpyDeviceToHost) ;
/* End of CUDA execution */
end=clock();
/* Measuring Performance */
dNumOps =rows*columns;
dSeconds = (end-start)/1000.0;
gflops = 1.0e-9 * dNumOps/dSeconds;
/* Printing Performance */
std::cout<<"-------------Shared Kernel Performance ------------- "<<std::endl<<std::endl;
std::cout<<" Number of Operations : "<<dNumOps<<std::endl;
std::cout<<" Total time taken : "<<dSeconds*1000<<"ms"<<std::endl;
std::cout<<" GFlop per second : "<<gflops<<std::endl<<std::endl;
/* Start converting to greyscale image in BMP format */
std::cout<<"Converting to GreyScale image"<<std::endl;
std::ofstream f( "grey_bmp_file.bmp",std::ios::out | std::ios::trunc | std::ios::binary );
/* setup variables for BITMAPFILEHEADER */
unsigned long headers_size = 14 // sizeof( BITMAPFILEHEADER )
+ 40; // sizeof( BITMAPINFOHEADER )
unsigned long padding_size = (4 - ((columns * 3) % 4)) % 4;
unsigned long pixel_data_size = rows * ((columns * 3) + padding_size);
/* Setup BITMAPFILEHEADER for grey image in BMP format */
f.put( 'B' ).put( 'M' ); // bfType
f << lwrite( headers_size + pixel_data_size, 4 ); // bfSize
f << lwrite( 0, 2 ); // bfReserved1
f << lwrite( 0, 2 ); // bfReserved2
f << lwrite( headers_size, 4 ); // bfOffBits
/* Setup BITMAPINFOHEADER for grey image in BMP format */
f << lwrite( 40, 4 ); // biSize
f << lwrite( columns, 4 ); // biWidth
f << lwrite( rows, 4 ); // biHeight
f << lwrite( 1, 2 ); // biPlanes
f << lwrite( 24, 2 ); // biBitCount
f << lwrite( 0, 4 ); // biCompression=BI_RGB
f << lwrite( pixel_data_size, 4 ); // biSizeImage
f << lwrite( 0, 4 ); // biXPelsPerMeter
f << lwrite( 0, 4 ); // biYPelsPerMeter
f << lwrite( 0, 4 ); // biClrUsed
f << lwrite( 0, 4 ); // biClrImportant
/* Writing pixel data of grey BMP image */
for (unsigned row = 0; row<rows; row++) // bottom-to-top
{
for (unsigned col = 0; col < columns; col++) // left-to-right
{
unsigned char red, green, blue;
red=(unsigned char)out_img_data[row][col];
green=(unsigned char)out_img_data[row][col];
blue=(unsigned char)out_img_data[row][col];
f.put( static_cast <char> (blue) )
.put( static_cast <char> (green) )
.put( static_cast <char> (red) );
}
if (linePadding) f << lwrite( 0, linePadding );
}
std::cout<<"------------- Processing Completed ------------- "<<std::endl<<std::endl;
}
int main()
{
process_bitmap_file();
}
| 8aafd741e6d11bef2ee1bbb6ceb1576124214510.cu | //============================================================================
// Name : hpc_final_projectSM.cu
// Author : Rashi Goyal
// Copyright : Your copyright notice
// Description : Color to Grayscale using CUDA & C++,
// To Run : nvcc hpc_final_projectSM.cu -lcublas -o hpc_final_projectSM.out
// Note : Please see report to understand how to run the code to get
// different outputs
//============================================================================
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
// #include "main.h"
// CUDA runtime
#include <cuda_runtime.h>
#include <cublas_v2.h>
#define TILE_WIDTH 8
// kernel implementation for Matrix Multiplication Naive (Non Shared)
__global__ void Convert_to_Grey_2d( int *d_gpu_matrix_in , int *d_gpu_matrix_out , const int WIDTH ){
// calculate row & col values for current thread
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
// calculate index using naive logic
int index=col*WIDTH + row;
int rgbvalue=d_gpu_matrix_in[index];
// extract RGB values from the Pixel data matrix
int blue = rgbvalue % 1000;
int green = ((rgbvalue % 1000000)-blue)/1000;
int red = ((rgbvalue / 1000000)-1000);
// calculate grey scale value from RGB values
d_gpu_matrix_out[index]=(int)((red*.299f) + (green*.587f) + (blue*.114f));
}
// kernel implementation for Matrix Multiplication Naive (Shared)
__global__ void Convert_to_Grey_shared( int *d_gpu_matrix_in , int *d_gpu_matrix_out , const int WIDTH ){
__shared__ int rgbvalue;
// calculate row & col values for current thread
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
// calculate index using naive logic
int index=col*WIDTH + row;
rgbvalue=d_gpu_matrix_in[index];
// extract RGB values from the Pixel data matrix
int blue = rgbvalue % 1000;
int green = ((rgbvalue % 1000000)-blue)/1000;
int red = ((rgbvalue / 1000000)-1000);
// calculate grey scale value from RGB values
d_gpu_matrix_out[index]=(int)((red*.299f) + (green*.587f) + (blue*.114f));
}
// structure to write data into file
struct lwrite
{
unsigned long value;
unsigned size;
lwrite( unsigned long value, unsigned size ):
value( value ), size( size )
{ }
};
// method to define operator for file operations
inline std::ostream& operator << ( std::ostream& outs, const lwrite& v )
{
unsigned long value = v.value;
for (unsigned cntr = 0; cntr < v.size; cntr++, value >>= 8)
outs.put( static_cast <char> (value & 0xFF) );
return outs;
}
// method to read data from fstream
template <typename Type>
void read(std::ifstream &fp, Type &result, std::size_t size) {
fp.read(reinterpret_cast<char*>(&result), size);
}
// Bitmap structure to store Bitmap Metadata
struct BMP
{
typedef int FXPT2DOT30;
typedef struct {
FXPT2DOT30 ciexyzX;
FXPT2DOT30 ciexyzY;
FXPT2DOT30 ciexyzZ;
} CIEXYZ;
typedef struct {
CIEXYZ ciexyzRed;
CIEXYZ ciexyzGreen;
CIEXYZ ciexyzBlue;
} CIEXYZTRIPLE;
// structure to store Bitmap Headers Metadata
struct {
unsigned short bfType; //type of format
unsigned int bfSize; //file size
unsigned short bfReserved1;
unsigned short bfReserved2;
unsigned int bfOffBits;
} BITMAPFILEHEADER;
// structure to store Bitmap Info Headers Metadata
struct {
unsigned int biSize;
unsigned int biWidth; //width of image
unsigned int biHeight; //height of image
unsigned short biPlanes;
unsigned short biBitCount;
unsigned int biCompression; //type of compression done
unsigned int biSizeImage;
unsigned int biXPelsPerMeter;
unsigned int biYPelsPerMeter;
unsigned int biClrUsed;
unsigned int biClrImportant;
unsigned int biRedMask; //Red value of pixel
unsigned int biGreenMask; //Green value of pixel
unsigned int biBlueMask; //Blue value of Pixel
unsigned int biAlphaMask; //alpha value of pixel
unsigned int biCSType;
CIEXYZTRIPLE biEndpoints;
unsigned int biGammaRed;
unsigned int biGammaGreen;
unsigned int biGammaBlue;
unsigned int biIntent;
unsigned int biProfileData;
unsigned int biProfileSize;
unsigned int biReserved;
} BITMAPINFOHEADER;
};
// structure to store RGBA values
typedef struct {
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
} RGBQUAD;
// method to extract RGBA 8 bits from BITMAP Data
unsigned char bitextract(const unsigned int byte, const unsigned int mask) {
if (mask == 0) {
return 0;
}
int maskBufer = mask, maskPadding = 0;
while (!(maskBufer & 1)) {
maskBufer >>= 1;
maskPadding++;
}
return (byte & mask) >> maskPadding;
}
// method to process BITMAP Data
void process_bitmap_file()
{
std::cout<<std::endl<<std::endl<<std::endl<<"################### High Performance Computing Project!! (Colored to Greyscale Conversion) ###################"<<std::endl<<std::endl;
/* Read Bitmap file */
char *fileName = (char *)"bmp_image1.bmp";
/* Validate if file is opening or not */
std::ifstream fileStream(fileName, std::ifstream::binary);
if (!fileStream) {
std::cout << "Error opening file '" << fileName << "'." << std::endl;
}
BMP bmp;
/* Read BITMAP file headers */
read(fileStream, bmp.BITMAPFILEHEADER.bfType, sizeof(bmp.BITMAPFILEHEADER.bfType));
read(fileStream, bmp.BITMAPFILEHEADER.bfSize, sizeof(bmp.BITMAPFILEHEADER.bfSize));
read(fileStream, bmp.BITMAPFILEHEADER.bfReserved1, sizeof(bmp.BITMAPFILEHEADER.bfReserved1));
read(fileStream, bmp.BITMAPFILEHEADER.bfReserved2, sizeof(bmp.BITMAPFILEHEADER.bfReserved2));
read(fileStream, bmp.BITMAPFILEHEADER.bfOffBits, sizeof(bmp.BITMAPFILEHEADER.bfOffBits));
/* Validate if image is of BMP format */
if (bmp.BITMAPFILEHEADER.bfType != 0x4D42) {
std::cout << "Error: '" << fileName << "' is not BMP file." << std::endl;
}
/* Read BITMAP info headers */
read(fileStream, bmp.BITMAPINFOHEADER.biSize, sizeof(bmp.BITMAPINFOHEADER.biSize));
/* Read if BITMAP headers are greater than 12*/
if (bmp.BITMAPINFOHEADER.biSize >= 12) {
read(fileStream, bmp.BITMAPINFOHEADER.biWidth, sizeof(bmp.BITMAPINFOHEADER.biWidth));
read(fileStream, bmp.BITMAPINFOHEADER.biHeight, sizeof(bmp.BITMAPINFOHEADER.biHeight));
read(fileStream, bmp.BITMAPINFOHEADER.biPlanes, sizeof(bmp.BITMAPINFOHEADER.biPlanes));
read(fileStream, bmp.BITMAPINFOHEADER.biBitCount, sizeof(bmp.BITMAPINFOHEADER.biBitCount));
}
int colorsCount = bmp.BITMAPINFOHEADER.biBitCount >> 3;
if (colorsCount < 3) {
colorsCount = 3;
}
int bitsOnColor = bmp.BITMAPINFOHEADER.biBitCount / colorsCount;
int maskValue = (1 << bitsOnColor) - 1;
/* Read if BITMAP headers are greater than 40 (Bitmap V1)*/
if (bmp.BITMAPINFOHEADER.biSize >= 40) {
read(fileStream, bmp.BITMAPINFOHEADER.biCompression, sizeof(bmp.BITMAPINFOHEADER.biCompression));
read(fileStream, bmp.BITMAPINFOHEADER.biSizeImage, sizeof(bmp.BITMAPINFOHEADER.biSizeImage));
read(fileStream, bmp.BITMAPINFOHEADER.biXPelsPerMeter, sizeof(bmp.BITMAPINFOHEADER.biXPelsPerMeter));
read(fileStream, bmp.BITMAPINFOHEADER.biYPelsPerMeter, sizeof(bmp.BITMAPINFOHEADER.biYPelsPerMeter));
read(fileStream, bmp.BITMAPINFOHEADER.biClrUsed, sizeof(bmp.BITMAPINFOHEADER.biClrUsed));
read(fileStream, bmp.BITMAPINFOHEADER.biClrImportant, sizeof(bmp.BITMAPINFOHEADER.biClrImportant));
}
bmp.BITMAPINFOHEADER.biRedMask = 0;
bmp.BITMAPINFOHEADER.biGreenMask = 0;
bmp.BITMAPINFOHEADER.biBlueMask = 0;
/* Read if BITMAP headers are greater than 52 (Bitmap V2)*/
if (bmp.BITMAPINFOHEADER.biSize >= 52) {
read(fileStream, bmp.BITMAPINFOHEADER.biRedMask, sizeof(bmp.BITMAPINFOHEADER.biRedMask));
read(fileStream, bmp.BITMAPINFOHEADER.biGreenMask, sizeof(bmp.BITMAPINFOHEADER.biGreenMask));
read(fileStream, bmp.BITMAPINFOHEADER.biBlueMask, sizeof(bmp.BITMAPINFOHEADER.biBlueMask));
}
if (bmp.BITMAPINFOHEADER.biRedMask == 0 || bmp.BITMAPINFOHEADER.biGreenMask == 0 || bmp.BITMAPINFOHEADER.biBlueMask == 0) {
bmp.BITMAPINFOHEADER.biRedMask = maskValue << (bitsOnColor * 2);
bmp.BITMAPINFOHEADER.biGreenMask = maskValue << bitsOnColor;
bmp.BITMAPINFOHEADER.biBlueMask = maskValue;
}
/* Read if BITMAP headers are greater than 56 (Bitmap V3)*/
if (bmp.BITMAPINFOHEADER.biSize >= 56) {
read(fileStream, bmp.BITMAPINFOHEADER.biAlphaMask, sizeof(bmp.BITMAPINFOHEADER.biAlphaMask));
} else {
bmp.BITMAPINFOHEADER.biAlphaMask = maskValue << (bitsOnColor * 3);
}
/* Read if BITMAP headers are greater than 108 (Bitmap V4)*/
if (bmp.BITMAPINFOHEADER.biSize >= 108) {
read(fileStream, bmp.BITMAPINFOHEADER.biCSType, sizeof(bmp.BITMAPINFOHEADER.biCSType));
read(fileStream, bmp.BITMAPINFOHEADER.biEndpoints, sizeof(bmp.BITMAPINFOHEADER.biEndpoints));
read(fileStream, bmp.BITMAPINFOHEADER.biGammaRed, sizeof(bmp.BITMAPINFOHEADER.biGammaRed));
read(fileStream, bmp.BITMAPINFOHEADER.biGammaGreen, sizeof(bmp.BITMAPINFOHEADER.biGammaGreen));
read(fileStream, bmp.BITMAPINFOHEADER.biGammaBlue, sizeof(bmp.BITMAPINFOHEADER.biGammaBlue));
}
/* Read if BITMAP headers are greater than 108 (Bitmap V5)*/
if (bmp.BITMAPINFOHEADER.biSize >= 124) {
read(fileStream, bmp.BITMAPINFOHEADER.biIntent, sizeof(bmp.BITMAPINFOHEADER.biIntent));
read(fileStream, bmp.BITMAPINFOHEADER.biProfileData, sizeof(bmp.BITMAPINFOHEADER.biProfileData));
read(fileStream, bmp.BITMAPINFOHEADER.biProfileSize, sizeof(bmp.BITMAPINFOHEADER.biProfileSize));
read(fileStream, bmp.BITMAPINFOHEADER.biReserved, sizeof(bmp.BITMAPINFOHEADER.biReserved));
}
if (bmp.BITMAPINFOHEADER.biSize != 12 && bmp.BITMAPINFOHEADER.biSize != 40 && bmp.BITMAPINFOHEADER.biSize != 52 &&
bmp.BITMAPINFOHEADER.biSize != 56 && bmp.BITMAPINFOHEADER.biSize != 108 && bmp.BITMAPINFOHEADER.biSize != 124) {
std::cout << "Error: Unsupported BMP format." << std::endl;
}
if (bmp.BITMAPINFOHEADER.biBitCount != 16 && bmp.BITMAPINFOHEADER.biBitCount != 24 && bmp.BITMAPINFOHEADER.biBitCount != 32) {
std::cout << "Error: Unsupported BMP bit count." << std::endl;
}
if (bmp.BITMAPINFOHEADER.biCompression != 0 && bmp.BITMAPINFOHEADER.biCompression != 3) {
std::cout << "Error: Unsupported BMP compression." << std::endl;
}
/* Setting up rows & columns in Image data*/
int rows=bmp.BITMAPINFOHEADER.biHeight;
int columns=bmp.BITMAPINFOHEADER.biWidth;
std::cout<<"------------- IMAGE DETAILS ------------- "<<std::endl<<std::endl;
std::cout<<" Image Size (Height,Width) : ("<<rows<<","<<columns<<")"<<std::endl;
std::cout<<" Number of Pixels : "<<rows*columns<<std::endl;
std::cout<<" Image Format : BMP"<<std::endl<<std::endl;
/* Print BITMAP headers*/
std::cout<<"------------- BIT MAP HEADER ------------- "<<std::endl<<std::endl;
std::cout<<"bfType : "<<bmp.BITMAPFILEHEADER.bfType <<std::endl;
std::cout<<"bfSize : "<<bmp.BITMAPFILEHEADER.bfSize <<std::endl;
std::cout<<"bfReserved1 : "<<bmp.BITMAPFILEHEADER.bfReserved1 <<std::endl;
std::cout<<"bfReserved2 : "<<bmp.BITMAPFILEHEADER.bfReserved2 <<std::endl;
std::cout<<"bfOffBits : "<<bmp.BITMAPFILEHEADER.bfOffBits <<std::endl<<std::endl;
std::cout<<"------------- BIT INFO HEADER ------------- "<<std::endl<<std::endl;
std::cout<<"biSize : "<<bmp.BITMAPINFOHEADER.biSize <<std::endl;
std::cout<<"biWidth : "<<bmp.BITMAPINFOHEADER.biWidth <<std::endl;
std::cout<<"biHeight : "<<bmp.BITMAPINFOHEADER.biHeight <<std::endl;
std::cout<<"biPlanes : "<<bmp.BITMAPINFOHEADER.biPlanes <<std::endl;
std::cout<<"biBitCount : "<<bmp.BITMAPINFOHEADER.biBitCount <<std::endl;
std::cout<<"biCompression : "<<bmp.BITMAPINFOHEADER.biCompression <<std::endl;
std::cout<<"biSizeImage : "<<bmp.BITMAPINFOHEADER.biSizeImage <<std::endl;
std::cout<<"biXPelsPerMeter : "<<bmp.BITMAPINFOHEADER.biXPelsPerMeter <<std::endl;
std::cout<<"biYPelsPerMeter : "<<bmp.BITMAPINFOHEADER.biYPelsPerMeter <<std::endl;
std::cout<<"biClrUsed : "<<bmp.BITMAPINFOHEADER.biClrUsed <<std::endl;
std::cout<<"biClrImportant : "<<bmp.BITMAPINFOHEADER.biClrImportant <<std::endl;
std::cout<<"biRedMask : "<<bmp.BITMAPINFOHEADER.biRedMask <<std::endl;
std::cout<<"biGreenMask : "<<bmp.BITMAPINFOHEADER.biGreenMask <<std::endl;
std::cout<<"biBlueMask : "<<bmp.BITMAPINFOHEADER.biBlueMask <<std::endl;
std::cout<<"biAlphaMask : "<<bmp.BITMAPINFOHEADER.biAlphaMask <<std::endl;
std::cout<<"biCSType : "<<bmp.BITMAPINFOHEADER.biCSType <<std::endl;
// std::cout<<"biEndpoints : "<<bmp.BITMAPINFOHEADER.biEndpoints <<std::endl;
std::cout<<"biGammaRed : "<<bmp.BITMAPINFOHEADER.biGammaRed <<std::endl;
std::cout<<"biGammaGreen : "<<bmp.BITMAPINFOHEADER.biGammaGreen <<std::endl;
std::cout<<"biGammaBlue : "<<bmp.BITMAPINFOHEADER.biGammaBlue <<std::endl;
std::cout<<"biIntent : "<<bmp.BITMAPINFOHEADER.biIntent <<std::endl;
std::cout<<"biProfileData : "<<bmp.BITMAPINFOHEADER.biProfileData <<std::endl;
std::cout<<"biProfileSize : "<<bmp.BITMAPINFOHEADER.biProfileSize <<std::endl;
std::cout<<"biReserved : "<<bmp.BITMAPINFOHEADER.biReserved <<std::endl;
int linePadding = ((bmp.BITMAPINFOHEADER.biWidth * (bmp.BITMAPINFOHEADER.biBitCount / 8)) % 4) & 3;
std::cout<<"linePadding : "<<linePadding<<std::endl;
/* Setting up RGBA structure to store Image data*/
RGBQUAD **rgbInfo = new RGBQUAD*[rows];
for (unsigned int i = 0; i < rows; i++) {
rgbInfo[i] = new RGBQUAD[columns];
}
/* Setting up Matrixs(rows * columns) to store RGB values of image data*/
int in_img_data[rows][columns]; // used to store Original Image Data at host
int serial_img_data[rows][columns]; // used to store Converted Image Data at host
int out_img_data[rows][columns]; // used to store Converted Image Data at host
int *d_gpu_matrix_in; // used to store Original Image Data at device
int *d_gpu_matrix_out; // used to store Converted Image Data at device
/* create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes) */
cudaMalloc((void **) &d_gpu_matrix_in , rows*columns*sizeof(int) ) ;
cudaMalloc((void **) &d_gpu_matrix_out , rows*columns*sizeof(int)) ;
/* variables for performance calculations */
clock_t start;
clock_t end;
unsigned int bufer;
/* Starting to read bitmap file for RGBA values */
for (unsigned int i = 0; i < bmp.BITMAPINFOHEADER.biHeight; i++) {
for (unsigned int j = 0; j < bmp.BITMAPINFOHEADER.biWidth; j++) {
read(fileStream, bufer, bmp.BITMAPINFOHEADER.biBitCount / 8);
rgbInfo[i][j].rgbRed = bitextract(bufer, bmp.BITMAPINFOHEADER.biRedMask);
rgbInfo[i][j].rgbGreen = bitextract(bufer, bmp.BITMAPINFOHEADER.biGreenMask);
rgbInfo[i][j].rgbBlue = bitextract(bufer, bmp.BITMAPINFOHEADER.biBlueMask);
rgbInfo[i][j].rgbReserved = bitextract(bufer, bmp.BITMAPINFOHEADER.biAlphaMask);
/* storing RGBA values as 1+R+G+B Example R=001 G=019 B=255 will compute RGB Value as rgbValue= 1001019255*/
int rgbValue= 1000000000;
rgbValue=rgbValue+(((int)rgbInfo[i][j].rgbRed) *1000000);
rgbValue=rgbValue+(((int)rgbInfo[i][j].rgbGreen) *1000);
rgbValue=rgbValue+((int)rgbInfo[i][j].rgbBlue);
/* storing data into input matrix for Kernel*/
in_img_data[i][j]=rgbValue;
}
fileStream.seekg(linePadding, std::ios_base::cur);
}
start=clock();
for (unsigned int i = 0; i < bmp.BITMAPINFOHEADER.biHeight; i++) {
for (unsigned int j = 0; j < bmp.BITMAPINFOHEADER.biWidth; j++) {
/* Code for serial execution of Grey Scale computation */
int rgbValue= in_img_data[i][j];
int blue = rgbValue % 1000;
int green = ((rgbValue % 1000000)-blue)/1000;
int red = ((rgbValue / 1000000)-1000);
serial_img_data[i][j] =(int)((red*.299f) + (green*.587f) + (blue*.114f));
}
}
end=clock();
/* Measuring Performance */
double dNumOps =rows*columns;
double dSeconds = (end-start)/1000.0;
double gflops = 1.0e-9 * dNumOps/dSeconds;
/* Printing Performance */
std::cout<<"------------- Serial Implementation Performance ------------- "<<std::endl<<std::endl;
std::cout<<" Number of Operations : "<<dNumOps<<std::endl;
std::cout<<" Total time taken : "<<dSeconds*1000<<"ms"<<std::endl;
std::cout<<" GFlop per second : "<<gflops<<std::endl<<std::endl;
/* Starting of CUDA execution */
start=clock();
/* Define dimGrid & dimBlock */
dim3 dimGrid ( rows/TILE_WIDTH+1 , columns/TILE_WIDTH+1 ,1 ) ;
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ;
/* Transfer memory from Host to Device */
cudaMemcpy ( d_gpu_matrix_in , in_img_data , rows*columns*sizeof(int) , cudaMemcpyHostToDevice ) ;
/* Displaying Kernel Configurations */
std::cout<<"------------- Kernel Config ------------- "<<std::endl<<std::endl;
std::cout<<" Grid (x,y,z) : ("<<rows/TILE_WIDTH+1 <<","<<columns/TILE_WIDTH+1<<",1)"<<std::endl;
std::cout<<" Block (x,y,z) : ("<<TILE_WIDTH <<","<<TILE_WIDTH<<",1)"<<std::endl<<std::endl;
/* Kernel Execution */
Convert_to_Grey_2d <<<dimGrid,dimBlock>>> ( d_gpu_matrix_in ,d_gpu_matrix_out , columns) ;
std::cout<<" Kernel running...."<<std::endl<<std::endl;
/* Transfer memory from Device to Host */
cudaMemcpy(out_img_data , d_gpu_matrix_out , rows*columns*sizeof(int) ,cudaMemcpyDeviceToHost) ;
/* End of CUDA execution */
end=clock();
/* Measuring Performance */
dNumOps =rows*columns;
dSeconds = (end-start)/1000.0;
gflops = 1.0e-9 * dNumOps/dSeconds;
/* Printing Performance */
std::cout<<"-------------Kernel Performance ------------- "<<std::endl<<std::endl;
std::cout<<" Number of Operations : "<<dNumOps<<std::endl;
std::cout<<" Total time taken : "<<dSeconds*1000<<"ms"<<std::endl;
std::cout<<" GFlop per second : "<<gflops<<std::endl<<std::endl;
/* Starting of shared kernel CUDA execution */
start=clock();
/* Displaying Kernel Configurations */
std::cout<<"-------------Shared Kernel Config ------------- "<<std::endl<<std::endl;
std::cout<<" Grid (x,y,z) : ("<<rows/TILE_WIDTH+1 <<","<<columns/TILE_WIDTH+1<<",1)"<<std::endl;
std::cout<<" Block (x,y,z) : ("<<TILE_WIDTH <<","<<TILE_WIDTH<<",1)"<<std::endl<<std::endl;
/* Kernel Execution */
Convert_to_Grey_shared <<<dimGrid,dimBlock>>> ( d_gpu_matrix_in ,d_gpu_matrix_out , columns) ;
std::cout<<"Shared Kernel running...."<<std::endl<<std::endl;
/* Transfer memory from Device to Host */
cudaMemcpy(out_img_data , d_gpu_matrix_out , rows*columns*sizeof(int) ,cudaMemcpyDeviceToHost) ;
/* End of CUDA execution */
end=clock();
/* Measuring Performance */
dNumOps =rows*columns;
dSeconds = (end-start)/1000.0;
gflops = 1.0e-9 * dNumOps/dSeconds;
/* Printing Performance */
std::cout<<"-------------Shared Kernel Performance ------------- "<<std::endl<<std::endl;
std::cout<<" Number of Operations : "<<dNumOps<<std::endl;
std::cout<<" Total time taken : "<<dSeconds*1000<<"ms"<<std::endl;
std::cout<<" GFlop per second : "<<gflops<<std::endl<<std::endl;
/* Start converting to greyscale image in BMP format */
std::cout<<"Converting to GreyScale image"<<std::endl;
std::ofstream f( "grey_bmp_file.bmp",std::ios::out | std::ios::trunc | std::ios::binary );
/* setup variables for BITMAPFILEHEADER */
unsigned long headers_size = 14 // sizeof( BITMAPFILEHEADER )
+ 40; // sizeof( BITMAPINFOHEADER )
unsigned long padding_size = (4 - ((columns * 3) % 4)) % 4;
unsigned long pixel_data_size = rows * ((columns * 3) + padding_size);
/* Setup BITMAPFILEHEADER for grey image in BMP format */
f.put( 'B' ).put( 'M' ); // bfType
f << lwrite( headers_size + pixel_data_size, 4 ); // bfSize
f << lwrite( 0, 2 ); // bfReserved1
f << lwrite( 0, 2 ); // bfReserved2
f << lwrite( headers_size, 4 ); // bfOffBits
/* Setup BITMAPINFOHEADER for grey image in BMP format */
f << lwrite( 40, 4 ); // biSize
f << lwrite( columns, 4 ); // biWidth
f << lwrite( rows, 4 ); // biHeight
f << lwrite( 1, 2 ); // biPlanes
f << lwrite( 24, 2 ); // biBitCount
f << lwrite( 0, 4 ); // biCompression=BI_RGB
f << lwrite( pixel_data_size, 4 ); // biSizeImage
f << lwrite( 0, 4 ); // biXPelsPerMeter
f << lwrite( 0, 4 ); // biYPelsPerMeter
f << lwrite( 0, 4 ); // biClrUsed
f << lwrite( 0, 4 ); // biClrImportant
/* Writing pixel data of grey BMP image */
for (unsigned row = 0; row<rows; row++) // bottom-to-top
{
for (unsigned col = 0; col < columns; col++) // left-to-right
{
unsigned char red, green, blue;
red=(unsigned char)out_img_data[row][col];
green=(unsigned char)out_img_data[row][col];
blue=(unsigned char)out_img_data[row][col];
f.put( static_cast <char> (blue) )
.put( static_cast <char> (green) )
.put( static_cast <char> (red) );
}
if (linePadding) f << lwrite( 0, linePadding );
}
std::cout<<"------------- Processing Completed ------------- "<<std::endl<<std::endl;
}
int main()
{
process_bitmap_file();
}
|
0fbad9a7f377c97aa6905e6697729db420069919.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rtv.h"
#include "rply.h"
#include "timer.h"
#include <cfloat>
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <vector>
#include <fstream>
//---------------------------------------------------------------------------------------------------------------------
#define HIT_GPU
#define BLOCKSIZE 512
#ifdef HIT_CPU
#define TRI_LIMIT 8
#define RAY_LIMIT 8
#elif defined(HIT_GPU)
#define TRI_LIMIT 8192
#define RAY_LIMIT 32768
#endif
#define HIT_EPSILON 1e-6f
//---------------------------------------------------------------------------------------------------------------------
struct vec3
{
float x,y,z;
__host__ __device__ vec3() : x(0), y(0), z(0) {}
__host__ __device__ explicit vec3(float a) : x(a), y(a), z(a) {}
__host__ __device__ explicit vec3(float* p) : x(p[0]), y(p[1]), z(p[2]) {}
__host__ __device__ vec3(float a, float b, float c) : x(a), y(b), z(c) {}
__host__ __device__ float* ptr() { return &x; }
__host__ __device__ bool operator!=(const vec3& o) const { return x != o.x || y != o.y || z != o.z; }
__host__ __device__ vec3 operator+(const vec3& o) const { return vec3(x+o.x, y+o.y, z+o.z); }
__host__ __device__ vec3 operator-(const vec3& o) const { return vec3(x-o.x, y-o.y, z-o.z); }
__host__ __device__ vec3 operator-() const { return vec3(-x, -y, -z); }
__host__ __device__ void operator+=(const vec3& o) { x+=o.x; y+=o.y; z+=o.z; }
__host__ __device__ vec3 operator*(float a) const { return vec3(x*a, y*a, z*a); }
__host__ __device__ void operator*=(float a) { x*=a; y*=a; z*=a; }
__host__ __device__ float dot(const vec3& o) const { return x*o.x + y*o.y + z*o.z; }
__host__ __device__ vec3 cross(const vec3& o) const { return vec3(y*o.z - z*o.y, z*o.x - x*o.z, x*o.y - y*o.x); }
__host__ __device__ float length() const { return sqrtf(x*x + y*y + z*z); }
__host__ __device__ vec3 normalized() const
{
const float invLen = 1.0f / length();
return vec3(x*invLen, y*invLen, z*invLen);
}
__host__ __device__ vec3 reciprocal() const
{
return vec3(1.0f/x, 1.0f/y, 1.0f/z);
}
__host__ __device__ float operator[](int i) const { return (&x)[i]; }
__host__ __device__ float& operator[](int i) { return (&x)[i]; }
};
struct TriV
{
vec3 v0, v1, v2;
};
struct TriN
{
vec3 n0, n1, n2;
};
struct Ray
{
vec3 o;
vec3 d;
};
struct Box
{
vec3 min, max;
Box() : min(FLT_MAX), max(-FLT_MAX) {}
void expand(const vec3& v)
{
min.x = ::min(min.x, v.x);
min.y = ::min(min.y, v.y);
min.z = ::min(min.z, v.z);
max.x = ::max(max.x, v.x);
max.y = ::max(max.y, v.y);
max.z = ::max(max.z, v.z);
}
void expand(const TriV& tri)
{
expand(tri.v0);
expand(tri.v1);
expand(tri.v2);
}
};
struct Canvas
{
bool operator!=(const Canvas& o) { return w != o.w || h != o.h; }
int w;
int h;
};
struct LookAt
{
bool operator!=(const LookAt& o) { return eye != o.eye || center != o.center || up != o.up; }
vec3 eye;
vec3 center;
vec3 up;
};
struct Camera
{
vec3 position;
vec3 lowerLeftDir;
vec3 du;
vec3 dv;
int nu;
int nv;
};
struct TriSet
{
TriSet() : ids(0), tris(0), norms(0), count(0), end(0) {}
int* ids;
TriV* tris;
TriN* norms;
int count;
int end;
};
struct RaySet
{
RaySet() : ids(0), rays(0), tmaxs(0), hits(0), count(0), end(0) {}
int* ids;
Ray* rays;
float* tmaxs;
int* hits;
int count;
int end;
};
//---------------------------------------------------------------------------------------------------------------------
__global__ void kernel(TriV* tris, int* triIDs, int numTiles, int lastTileSize, Ray* rays, int* rayIDs, int numRays, float* tmaxs, int* hits)
{
// per-block shared memory
__shared__ char sh_tris_byte[BLOCKSIZE*sizeof(TriV)];
TriV* sh_tris = (TriV*)sh_tris_byte;
// compute this thread's global index
int gThreadIdx = blockIdx.x*blockDim.x + threadIdx.x;
// get this thread's ray information
Ray ray;
float tmax;
if(gThreadIdx < numRays)
{
ray = rays[rayIDs[gThreadIdx]];
tmax = tmaxs[gThreadIdx];
}
// ID of triangle hit, if any
int hit = -1;
// for each tile of triangles
for(int tile = 0; tile < numTiles; ++tile)
{
const bool isLastTile = (tile == numTiles-1);
// load next tile in parallel: each thread loads a separate triangle from global to shared memory
if(!isLastTile || threadIdx.x < lastTileSize)
{
sh_tris[threadIdx.x] = tris[triIDs[tile*blockDim.x + threadIdx.x]];
}
// make sure everything is loaded in shared memory
__syncthreads();
// ---------------------------------------------------------------------------------------------------
if(gThreadIdx < numRays)
{
// for each triangle in shared memory
const int limit = isLastTile ? lastTileSize : blockDim.x;
for(int tid = 0; tid < limit; ++tid)
{
const TriV tri = sh_tris[tid];
/* find vectors for two edges sharing vert0 */
const vec3 edge1 = tri.v1 - tri.v0;
const vec3 edge2 = tri.v2 - tri.v0;
/* begin calculating determinant - also used to calculate U parameter */
const vec3 pvec = ray.d.cross(edge2);
/* if determinant is near zero, ray lies in plane of triangle */
const float det = edge1.dot(pvec);
if(det > -HIT_EPSILON && det < HIT_EPSILON)
continue;
const float inv_det = 1.0f / det;
/* calculate distance from vert0 to ray origin */
const vec3 tvec = ray.o - tri.v0;
/* calculate U parameter and test bounds */
const float u = tvec.dot(pvec) * inv_det;
if(u < 0.0f || u > 1.0f)
continue;
/* prepare to test V parameter */
const vec3 qvec = tvec.cross(edge1);
/* calculate V parameter and test bounds */
const float v = ray.d.dot(qvec) * inv_det;
if(v < 0.0f || u + v > 1.0f)
continue;
/* calculate t, ray hits triangle */
const float f = edge2.dot(qvec) * inv_det;
if((f >= tmax) || (f < -HIT_EPSILON))
continue;
// Have a valid hit point here. Store it.
tmax = f;
hit = tile*blockDim.x + tid;
}
}
// wait for every thread to compute its intersections before looping back and loading new tile
__syncthreads();
// ---------------------------------------------------------------------------------------------------
}
// return final results
if(gThreadIdx < numRays)
{
tmaxs[gThreadIdx] = tmax;
hits[gThreadIdx] = hit;
}
}
//---------------------------------------------------------------------------------------------------------------------
static timer s_t;
static double s_intersectTime = 0.0;
static double s_triSendTime = 0.0;
static double s_raySendTime = 0.0;
static double s_tmaxCopyTime = 0.0;
static double s_tmaxSendTime = 0.0;
static double s_kernelTime = 0.0;
static double s_tmaxReadTime = 0.0;
static double s_hitReadTime = 0.0;
static double s_hitUpdateTime = 0.0;
static double s_splitTime = 0.0;
static double s_partitionTriTime = 0.0;
static double s_partitionRayTime = 0.0;
#define TIMED_CALL(f, t) s_t.restart(); f; t += s_t.msec();
static int s_numKernelCalls = 0;
// GPU data
int* d_triIDs = 0;
TriV* d_tris = 0;
int* d_rayIDs = 0;
Ray* d_rays = 0;
float* d_tmaxs = 0;
int* d_hits = 0;
// Host data, used to read back results temporarily
float* h_tmaxs = 0;
int* h_hits = 0;
#ifdef HIT_GPU
void intersect(const TriSet& triSet, RaySet& raySet)
{
// send current IDs to GPU
TIMED_CALL(hipMemcpy(d_triIDs, triSet.ids, triSet.end*sizeof(int), hipMemcpyDefault), s_triSendTime);
TIMED_CALL(hipMemcpy(d_rayIDs, raySet.ids, raySet.end*sizeof(int), hipMemcpyDefault), s_raySendTime);
// send current tmax's to GPU
s_t.restart();
for(int i = 0; i < raySet.end; ++i)
{
h_tmaxs[i] = raySet.tmaxs[raySet.ids[i]];
}
s_tmaxCopyTime += s_t.msec();
TIMED_CALL(hipMemcpy(d_tmaxs, h_tmaxs, raySet.end*sizeof(float), hipMemcpyDefault), s_tmaxSendTime);
// determine parameters
const int numBlocks = ceilf((float)raySet.end / (float)BLOCKSIZE);
const int numTiles = ceilf((float)triSet.end / (float)BLOCKSIZE);
const int lastTileSize = triSet.end < BLOCKSIZE? triSet.end : triSet.end % BLOCKSIZE;
// launch kernel
s_t.restart();
hipLaunchKernelGGL(( kernel), dim3(numBlocks), dim3(BLOCKSIZE), 0, 0, d_tris, d_triIDs, numTiles, lastTileSize, d_rays, d_rayIDs, raySet.end, d_tmaxs, d_hits);
hipDeviceSynchronize();
s_kernelTime += s_t.msec();
++s_numKernelCalls;
// retrieve all results from GPU
TIMED_CALL(hipMemcpy(h_tmaxs, d_tmaxs, raySet.end*sizeof(float), hipMemcpyDefault), s_tmaxReadTime);
TIMED_CALL(hipMemcpy(h_hits, d_hits, raySet.end*sizeof(int), hipMemcpyDefault), s_hitReadTime);
// update data on CPU
s_t.restart();
for(int i = 0; i < raySet.end; ++i)
{
if(h_hits[i] >= 0)
{
const int rayID = raySet.ids[i];
raySet.tmaxs[rayID] = h_tmaxs[i];
raySet.hits[rayID] = triSet.ids[h_hits[i]];
}
}
s_hitUpdateTime += s_t.msec();
}
#endif
//---------------------------------------------------------------------------------------------------------------------
#ifdef HIT_CPU
void intersect(const TriSet& triSet, RaySet& raySet)
{
for(int t = 0; t < triSet.end; ++t)
{
const int triID = triSet.ids[t];
const TriV& tri = triSet.tris[triID];
for(int r = 0; r < raySet.end; ++r)
{
const int rayID = raySet.ids[r];
Ray& ray = raySet.rays[rayID];
/* find vectors for two edges sharing vert0 */
const vec3 edge1 = tri.v1 - tri.v0;
const vec3 edge2 = tri.v2 - tri.v0;
/* begin calculating determinant - also used to calculate U parameter */
const vec3 pvec = ray.d.cross(edge2);
/* if determinant is near zero, ray lies in plane of triangle */
const float det = edge1.dot(pvec);
if(det > -HIT_EPSILON && det < HIT_EPSILON)
continue;
const float inv_det = 1.0f / det;
/* calculate distance from vert0 to ray origin */
const vec3 tvec = ray.o - tri.v0;
/* calculate U parameter and test bounds */
const float u = tvec.dot(pvec) * inv_det;
if(u < 0.0f || u > 1.0f)
continue;
/* prepare to test V parameter */
const vec3 qvec = tvec.cross(edge1);
/* calculate V parameter and test bounds */
const float v = ray.d.dot(qvec) * inv_det;
if(v < 0.0f || u + v > 1.0f)
continue;
/* calculate t, ray hits triangle */
const float f = edge2.dot(qvec) * inv_det;
if((f >= raySet.tmaxs[rayID]) || (f < -HIT_EPSILON))
continue;
// Have a valid hit point here. Store it.
raySet.tmaxs[rayID] = f;
raySet.hits[rayID] = triID;
}
}
}
#endif
void splitBox(const Box& triBox, const TriSet& triSet, const RaySet& raySet, Box& nearBox, Box& farBox)
{
const float dx = triBox.max.x - triBox.min.x;
const float dy = triBox.max.y - triBox.min.y;
const float dz = triBox.max.z - triBox.min.z;
const int axis = (dx > dy && dx > dz)? 0 : (dy > dz)? 1 : 2;
const float pos = (triBox.min[axis] + triBox.max[axis]) * 0.5f;
Box left = triBox;
Box right = triBox;
left.max[axis] = pos;
right.min[axis] = pos;
const bool leftNear = raySet.rays[0].d[pos] >= 0.0f;
nearBox = leftNear? left : right;
farBox = leftNear? right : left;
}
void partitionTris(const Box& box, TriSet& triSet)
{
int newEnd = 0;
for(int i = 0; i < triSet.end; ++i)
{
Box triBox;
triBox.expand(triSet.tris[triSet.ids[i]]);
if(box.max.x < triBox.min.x) continue;
if(box.min.x > triBox.max.x) continue;
if(box.max.y < triBox.min.y) continue;
if(box.min.y > triBox.max.y) continue;
if(box.max.z < triBox.min.z) continue;
if(box.min.z > triBox.max.z) continue;
std::swap(triSet.ids[i], triSet.ids[newEnd]);
++newEnd;
}
triSet.end = newEnd;
}
void partitionRays(const Box& box, RaySet& raySet)
{
int newEnd = 0;
for(int i = 0; i < raySet.end; ++i)
{
const Ray& ray = raySet.rays[raySet.ids[i]];
const vec3 invDir = ray.d.reciprocal();
const float tx1 = (box.min.x - ray.o.x) * invDir.x;
const float tx2 = (box.max.x - ray.o.x) * invDir.x;
float tmin = ::min(tx1, tx2);
float tmax = ::max(tx1, tx2);
const float ty1 = (box.min.y - ray.o.y) * invDir.y;
const float ty2 = (box.max.y - ray.o.y) * invDir.y;
tmin = ::max(tmin, ::min(ty1, ty2));
tmax = ::min(tmax, ::max(ty1, ty2));
const float tz1 = (box.min.z - ray.o.z) * invDir.z;
const float tz2 = (box.max.z - ray.o.z) * invDir.z;
tmin = ::max(tmin, ::min(tz1, tz2));
tmax = ::min(raySet.tmaxs[raySet.ids[i]], ::min(tmax, ::max(tz1, tz2)));
if(tmin > tmax) continue;
std::swap(raySet.ids[i], raySet.ids[newEnd]);
++newEnd;
}
raySet.end = newEnd;
}
//---------------------------------------------------------------------------------------------------------------------
void reallocData(const Canvas& canvas, RaySet& raySet)
{
const int npixels = canvas.w * canvas.h;
raySet.count = npixels;
// ---------------- v CPU v ----------------
hipHostFree(raySet.ids);
hipHostMalloc(&raySet.ids, raySet.count*sizeof(int));
for(int i = 0; i < raySet.count; ++i)
{
raySet.ids[i] = i;
}
hipHostFree(raySet.rays);
hipHostMalloc(&raySet.rays, raySet.count*sizeof(Ray));
hipHostFree(raySet.tmaxs);
hipHostMalloc(&raySet.tmaxs, raySet.count*sizeof(float));
hipHostFree(h_tmaxs);
hipHostMalloc(&h_tmaxs, raySet.count*sizeof(float));
hipHostFree(raySet.hits);
hipHostMalloc(&raySet.hits, raySet.count*sizeof(int));
hipHostFree(h_hits);
hipHostMalloc(&h_hits, raySet.count*sizeof(int));
// ---------------- v GPU v ----------------
hipFree(d_rayIDs);
hipMalloc(&d_rayIDs, raySet.count*sizeof(int));
hipFree(d_rays);
hipMalloc(&d_rays, raySet.count*sizeof(Ray));
hipFree(d_tmaxs);
hipMalloc(&d_tmaxs, raySet.count*sizeof(float));
hipFree(d_hits);
hipMalloc(&d_hits, raySet.count*sizeof(int));
}
void updateCamera(const Canvas& canvas, const LookAt& lookAt, Camera& camera)
{
// store position
camera.position = lookAt.eye;
// pre-computations
float invHeight = 1.0f / canvas.h;
float invWidth = 1.0f / canvas.w;
// compute camera basis
vec3 axisW = (lookAt.eye - lookAt.center).normalized();
vec3 axisV = lookAt.up.normalized();
vec3 axisU = axisV.cross(axisW);
// compute half scale factors for each basis vector
float sw = canvas.w * 0.01f; // try to keep directions around zero in floating-point value
float sv = sw * std::tan(0.523598775f); // half 60o in radians
float su = sv * canvas.w * invHeight;
// scale each vector
axisW *= sw;
axisV *= sv;
axisU *= su;
// store final direction
camera.lowerLeftDir = - axisU - axisV - axisW;
// compute full scales
axisV *= 2.0f;
axisU *= 2.0f;
// interpolation deltas
camera.dv = axisV * invHeight - axisU; // also goes back to start of u-axis
camera.du = axisU * invWidth;
// number of pixels in U and V directions
camera.nu = canvas.w;
camera.nv = canvas.h;
}
void generatePrimary(const Camera& camera, RaySet& raySet)
{
vec3 dir = camera.lowerLeftDir;
for(int v = 0; v < camera.nv; ++v)
{
for(int u = 0; u < camera.nu; ++u)
{
Ray& r = raySet.rays[v*camera.nu+u];
r.o = camera.position;
r.d = dir;
dir += camera.du;
}
dir += camera.dv;
}
for(int i = 0; i < raySet.count; ++i)
{
raySet.tmaxs[i] = FLT_MAX;
raySet.hits[i] = -1;
}
hipMemcpy(d_rays, raySet.rays, raySet.count*sizeof(Ray), hipMemcpyDefault);
}
void traceRays(const Box& triBox, TriSet& triSet, RaySet& raySet)
{
#ifdef HIT_GPU
if(triSet.end == 0 || raySet.end == 0)
{
return;
}
if(triSet.end < TRI_LIMIT && raySet.end < RAY_LIMIT)
#endif
#ifdef HIT_CPU
if(triSet.end < TRI_LIMIT || raySet.end < RAY_LIMIT)
#endif
{
static timer s_int_t;
s_int_t.restart();
intersect(triSet, raySet);
s_intersectTime += s_int_t.msec();
return;
}
Box nearBox;
Box farBox;
TIMED_CALL(splitBox(triBox, triSet, raySet, nearBox, farBox), s_splitTime);
int triEnd = triSet.end;
int rayEnd = raySet.end;
TIMED_CALL(partitionTris(nearBox, triSet), s_partitionTriTime);
TIMED_CALL(partitionRays(nearBox, raySet), s_partitionRayTime);
traceRays(nearBox, triSet, raySet);
triSet.end = triEnd;
raySet.end = rayEnd;
TIMED_CALL(partitionTris(farBox, triSet), s_partitionTriTime);
TIMED_CALL(partitionRays(farBox, raySet), s_partitionRayTime);
traceRays(farBox, triSet, raySet);
}
void doTraceRays(const Box& triBox, TriSet& triSet, RaySet& raySet)
{
triSet.end = triSet.count;
raySet.end = raySet.count;
traceRays(triBox, triSet, raySet);
}
void shadePixels(const TriSet& triSet, const RaySet& raySet, unsigned char* pixels)
{
for(int i = 0; i < raySet.count; ++i)
{
unsigned char c = 0;
const int triID = raySet.hits[i];
if(triID >= 0)
{
// recompute hit position
const Ray& ray = raySet.rays[i];
const vec3 hitPos = ray.o + ray.d * raySet.tmaxs[i];
// recompute barycentric coordinates
const TriV& tri = triSet.tris[triID];
const vec3 e0 = tri.v1 - tri.v0;
const vec3 e1 = tri.v2 - tri.v0;
const vec3 e2 = hitPos - tri.v0;
const float d00 = e0.dot(e0);
const float d01 = e0.dot(e1);
const float d11 = e1.dot(e1);
const float d20 = e2.dot(e0);
const float d21 = e2.dot(e1);
const float invDenom = 1.0f / (d00 * d11 - d01 * d01);
const float v = (d11 * d20 - d01 * d21) * invDenom;
const float w = (d00 * d21 - d01 * d20) * invDenom;
const float u = 1.0f - v - w;
// lerp normal
const TriN& norm = triSet.norms[triID];
const vec3 lerpN = (norm.n0*u + norm.n1*v + norm.n2*w).normalized();
// compute final color
c = 255 * lerpN.dot(-ray.d.normalized());
}
pixels[i*3+0] = c;
pixels[i*3+1] = c;
pixels[i*3+2] = c;
}
}
//---------------------------------------------------------------------------------------------------------------------
static Canvas s_canvas;
static LookAt s_lookAt;
static Camera s_camera;
static TriSet s_triSet;
static Box s_triBox;
static RaySet s_raySet;
void reshape(int w, int h)
{
s_canvas.w = w;
s_canvas.h = h;
reallocData(s_canvas, s_raySet);
}
void camera(float* eye, float* center, float* up)
{
s_lookAt.eye = vec3(eye);
s_lookAt.center = vec3(center);
s_lookAt.up = vec3(up);
updateCamera(s_canvas, s_lookAt, s_camera);
}
void render(unsigned char* pixels)
{
timer t;
s_splitTime = 0.0;
s_intersectTime = 0.0;
s_partitionTriTime = 0.0;
s_partitionRayTime = 0.0;
s_triSendTime = 0.0;
s_raySendTime = 0.0;
s_tmaxCopyTime = 0.0;
s_tmaxSendTime = 0.0;
s_kernelTime = 0.0;
s_tmaxReadTime = 0.0;
s_hitReadTime = 0.0;
s_hitUpdateTime = 0.0;
s_numKernelCalls = 0;
std::cout << "---------------------------------" << std::endl;
t.restart();
generatePrimary(s_camera, s_raySet);
std::cout << "generate: " << std::setw(15) << (int)t.msec() << " ms" << std::endl;
t.restart();
doTraceRays(s_triBox, s_triSet, s_raySet);
std::cout << "trace: " << std::setw(15) << (int)t.msec() << " ms" << std::endl;
std::cout << " " << "split: " << std::setw(9) << (int)s_splitTime << " ms" << std::endl;
std::cout << " " << "intersect: " << std::setw(9) << (int)s_intersectTime << " ms" << std::endl;
std::cout << " " << "triSend: " << std::setw(9) << s_triSendTime << " ms" << std::endl;
std::cout << " " << "raySend: " << std::setw(9) << s_raySendTime << " ms" << std::endl;
std::cout << " " << "tmaxCopy: " << std::setw(9) << s_tmaxCopyTime << " ms" << std::endl;
std::cout << " " << "tmaxSend: " << std::setw(9) << s_tmaxSendTime << " ms" << std::endl;
std::cout << " " << "kernel: " << std::setw(9) << s_kernelTime << " ms (count: " << s_numKernelCalls << ", avg: " << s_kernelTime / s_numKernelCalls << " ms)" << std::endl;
std::cout << " " << "tmaxRead: " << std::setw(9) << s_tmaxReadTime << " ms" << std::endl;
std::cout << " " << "hitRead: " << std::setw(9) << s_hitReadTime << " ms" << std::endl;
std::cout << " " << "hitUpdate: " << std::setw(9) << s_hitUpdateTime << " ms" << std::endl;
std::cout << " " << "partitionTri: " << std::setw(9) << (int)s_partitionTriTime << " ms" << std::endl;
std::cout << " " << "partitionRay: " << std::setw(9) << (int)s_partitionRayTime << " ms" << std::endl;
t.restart();
shadePixels(s_triSet, s_raySet, pixels);
std::cout << "shade: " << std::setw(15) << (int)t.msec() << " ms" << std::endl;
}
//---------------------------------------------------------------------------------------------------------------------
void loadScene1tri(TriSet& triSet)
{
TriV tri;
tri.v0 = vec3(-1,-1,0);
tri.v1 = vec3(1,-1,0);
tri.v2 = vec3(0,1,0);
triSet.tris = new TriV[1];
triSet.tris[0] = tri;
TriN norm;
norm.n0 = (tri.v1 - tri.v0).cross(tri.v2-tri.v0).normalized();
norm.n1 = norm.n0;
norm.n2 = norm.n0;
triSet.norms = new TriN[1];
triSet.norms[0] = norm;
triSet.ids = new int[1];
triSet.ids[0] = 0;
triSet.count = 1;
}
float randf(float min, float max)
{
return min + (float)rand()/(float)RAND_MAX * (max - min);
}
void loadSceneRand(TriSet& triSet)
{
triSet.count = 100000;
triSet.tris = new TriV[triSet.count];
triSet.norms = new TriN[triSet.count];
int dst = 0;
srand(122);
for(int i = 0; i < triSet.count; ++i)
{
vec3 c(randf(-5.0f, 5.0f), randf(-5.0f, 5.0f), randf(-5.0f, 5.0f));
TriV tri;
tri.v0 = c + vec3(randf(0.1f, 0.5f), randf(0.1f, 0.5f), randf(0.1f, 0.5f));
tri.v1 = c + vec3(randf(0.1f, 0.5f), randf(0.1f, 0.5f), randf(0.1f, 0.5f));
tri.v2 = c + vec3(randf(0.1f, 0.5f), randf(0.1f, 0.5f), randf(0.1f, 0.5f));
triSet.tris[dst] = tri;
TriN norm;
norm.n0 = (tri.v1 - tri.v0).cross(tri.v2-tri.v0).normalized();
norm.n1 = norm.n0;
norm.n2 = norm.n0;
triSet.norms[dst] = norm;
++dst;
}
}
static std::vector<vec3> vertices;
static std::vector<int> elements;
static int vertex_cb(p_ply_argument argument)
{
long id;
ply_get_argument_user_data(argument, NULL, &id);
if(id == 0)
{
vertices.resize(vertices.size()+1);
}
vertices.back()[id] = ply_get_argument_value(argument) * 50;
return 1;
}
static int face_cb(p_ply_argument argument)
{
long length, value_index;
ply_get_argument_property(argument, NULL, &length, &value_index);
switch (value_index)
{
case 0:
case 1:
case 2:
elements.push_back(ply_get_argument_value(argument));
break;
default:
break;
}
return 1;
}
void loadSceneBunny(TriSet& triSet)
{
p_ply ply = ply_open("/home/potato/Downloads/bunny.ply", NULL, 0, NULL);
if (!ply) exit(1);
if (!ply_read_header(ply)) exit(1);
long nvertices = ply_set_read_cb(ply, "vertex", "x", vertex_cb, NULL, 0);
ply_set_read_cb(ply, "vertex", "y", vertex_cb, NULL, 1);
ply_set_read_cb(ply, "vertex", "z", vertex_cb, NULL, 2);
long ntriangles = ply_set_read_cb(ply, "face", "vertex_indices", face_cb, NULL, 0);
if (!ply_read(ply)) exit(1);
ply_close(ply);
triSet.count = elements.size()/3;
triSet.tris = new TriV[triSet.count];
triSet.norms = new TriN[triSet.count];
std::vector<vec3> normals(vertices.size());
for(unsigned int e = 0; e < elements.size(); e+=3)
{
int e0 = elements[e+0];
int e1 = elements[e+1];
int e2 = elements[e+2];
vec3 v0 = vertices[e0];
vec3 v1 = vertices[e1];
vec3 v2 = vertices[e2];
vec3 n = (v1-v0).cross(v2-v0);
normals[e0] += n;
normals[e1] += n;
normals[e2] += n;
}
int i = 0;
for(unsigned int e = 0; e < elements.size(); e+=3)
{
int e0 = elements[e+0];
int e1 = elements[e+1];
int e2 = elements[e+2];
TriV t;
t.v0 = vertices[e0];
t.v1 = vertices[e1];
t.v2 = vertices[e2];
triSet.tris[i] = t;
TriN n;
n.n0 = normals[e0].normalized();
n.n1 = normals[e1].normalized();
n.n2 = normals[e2].normalized();
triSet.norms[i] = n;
++i;
}
}
void commonSetup(TriSet& triSet, Box& triBox)
{
triSet.ids = new int[triSet.count];
for(int i = 0; i < triSet.count; ++i)
{
triSet.ids[i] = i;
triBox.expand(triSet.tris[i]);
}
hipMalloc(&d_triIDs, triSet.count*sizeof(int));
hipMemcpy(d_triIDs, triSet.ids, triSet.count*sizeof(int), hipMemcpyDefault);
hipMalloc(&d_tris, triSet.count*sizeof(TriV));
hipMemcpy(d_tris, triSet.tris, triSet.count*sizeof(TriV), hipMemcpyDefault);
vec3 center = (triBox.min + triBox.max) * 0.5f;
vec3 eye = center + vec3(0,0,10);
vec3 up(0,1,0);
rtvSetCamera(eye.ptr(), center.ptr(), up.ptr());
}
//---------------------------------------------------------------------------------------------------------------------
int main()
{
rtvInit(1024, 1024);
rtvSetReshapeCallback(reshape);
rtvSetCameraCallback(camera);
rtvSetRenderCallback(render);
// loadScene1tri(s_triSet);
// loadSceneRand(s_triSet);
loadSceneBunny(s_triSet);
commonSetup(s_triSet, s_triBox);
rtvExec();
return 0;
}
| 0fbad9a7f377c97aa6905e6697729db420069919.cu | #include "rtv.h"
#include "rply.h"
#include "timer.h"
#include <cfloat>
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <vector>
#include <fstream>
//---------------------------------------------------------------------------------------------------------------------
#define HIT_GPU
#define BLOCKSIZE 512
#ifdef HIT_CPU
#define TRI_LIMIT 8
#define RAY_LIMIT 8
#elif defined(HIT_GPU)
#define TRI_LIMIT 8192
#define RAY_LIMIT 32768
#endif
#define HIT_EPSILON 1e-6f
//---------------------------------------------------------------------------------------------------------------------
struct vec3
{
float x,y,z;
__host__ __device__ vec3() : x(0), y(0), z(0) {}
__host__ __device__ explicit vec3(float a) : x(a), y(a), z(a) {}
__host__ __device__ explicit vec3(float* p) : x(p[0]), y(p[1]), z(p[2]) {}
__host__ __device__ vec3(float a, float b, float c) : x(a), y(b), z(c) {}
__host__ __device__ float* ptr() { return &x; }
__host__ __device__ bool operator!=(const vec3& o) const { return x != o.x || y != o.y || z != o.z; }
__host__ __device__ vec3 operator+(const vec3& o) const { return vec3(x+o.x, y+o.y, z+o.z); }
__host__ __device__ vec3 operator-(const vec3& o) const { return vec3(x-o.x, y-o.y, z-o.z); }
__host__ __device__ vec3 operator-() const { return vec3(-x, -y, -z); }
__host__ __device__ void operator+=(const vec3& o) { x+=o.x; y+=o.y; z+=o.z; }
__host__ __device__ vec3 operator*(float a) const { return vec3(x*a, y*a, z*a); }
__host__ __device__ void operator*=(float a) { x*=a; y*=a; z*=a; }
__host__ __device__ float dot(const vec3& o) const { return x*o.x + y*o.y + z*o.z; }
__host__ __device__ vec3 cross(const vec3& o) const { return vec3(y*o.z - z*o.y, z*o.x - x*o.z, x*o.y - y*o.x); }
__host__ __device__ float length() const { return sqrtf(x*x + y*y + z*z); }
__host__ __device__ vec3 normalized() const
{
const float invLen = 1.0f / length();
return vec3(x*invLen, y*invLen, z*invLen);
}
__host__ __device__ vec3 reciprocal() const
{
return vec3(1.0f/x, 1.0f/y, 1.0f/z);
}
__host__ __device__ float operator[](int i) const { return (&x)[i]; }
__host__ __device__ float& operator[](int i) { return (&x)[i]; }
};
struct TriV
{
vec3 v0, v1, v2;
};
struct TriN
{
vec3 n0, n1, n2;
};
struct Ray
{
vec3 o;
vec3 d;
};
struct Box
{
vec3 min, max;
Box() : min(FLT_MAX), max(-FLT_MAX) {}
void expand(const vec3& v)
{
min.x = std::min(min.x, v.x);
min.y = std::min(min.y, v.y);
min.z = std::min(min.z, v.z);
max.x = std::max(max.x, v.x);
max.y = std::max(max.y, v.y);
max.z = std::max(max.z, v.z);
}
void expand(const TriV& tri)
{
expand(tri.v0);
expand(tri.v1);
expand(tri.v2);
}
};
struct Canvas
{
bool operator!=(const Canvas& o) { return w != o.w || h != o.h; }
int w;
int h;
};
struct LookAt
{
bool operator!=(const LookAt& o) { return eye != o.eye || center != o.center || up != o.up; }
vec3 eye;
vec3 center;
vec3 up;
};
struct Camera
{
vec3 position;
vec3 lowerLeftDir;
vec3 du;
vec3 dv;
int nu;
int nv;
};
struct TriSet
{
TriSet() : ids(0), tris(0), norms(0), count(0), end(0) {}
int* ids;
TriV* tris;
TriN* norms;
int count;
int end;
};
struct RaySet
{
RaySet() : ids(0), rays(0), tmaxs(0), hits(0), count(0), end(0) {}
int* ids;
Ray* rays;
float* tmaxs;
int* hits;
int count;
int end;
};
//---------------------------------------------------------------------------------------------------------------------
__global__ void kernel(TriV* tris, int* triIDs, int numTiles, int lastTileSize, Ray* rays, int* rayIDs, int numRays, float* tmaxs, int* hits)
{
// per-block shared memory
__shared__ char sh_tris_byte[BLOCKSIZE*sizeof(TriV)];
TriV* sh_tris = (TriV*)sh_tris_byte;
// compute this thread's global index
int gThreadIdx = blockIdx.x*blockDim.x + threadIdx.x;
// get this thread's ray information
Ray ray;
float tmax;
if(gThreadIdx < numRays)
{
ray = rays[rayIDs[gThreadIdx]];
tmax = tmaxs[gThreadIdx];
}
// ID of triangle hit, if any
int hit = -1;
// for each tile of triangles
for(int tile = 0; tile < numTiles; ++tile)
{
const bool isLastTile = (tile == numTiles-1);
// load next tile in parallel: each thread loads a separate triangle from global to shared memory
if(!isLastTile || threadIdx.x < lastTileSize)
{
sh_tris[threadIdx.x] = tris[triIDs[tile*blockDim.x + threadIdx.x]];
}
// make sure everything is loaded in shared memory
__syncthreads();
// ---------------------------------------------------------------------------------------------------
if(gThreadIdx < numRays)
{
// for each triangle in shared memory
const int limit = isLastTile ? lastTileSize : blockDim.x;
for(int tid = 0; tid < limit; ++tid)
{
const TriV tri = sh_tris[tid];
/* find vectors for two edges sharing vert0 */
const vec3 edge1 = tri.v1 - tri.v0;
const vec3 edge2 = tri.v2 - tri.v0;
/* begin calculating determinant - also used to calculate U parameter */
const vec3 pvec = ray.d.cross(edge2);
/* if determinant is near zero, ray lies in plane of triangle */
const float det = edge1.dot(pvec);
if(det > -HIT_EPSILON && det < HIT_EPSILON)
continue;
const float inv_det = 1.0f / det;
/* calculate distance from vert0 to ray origin */
const vec3 tvec = ray.o - tri.v0;
/* calculate U parameter and test bounds */
const float u = tvec.dot(pvec) * inv_det;
if(u < 0.0f || u > 1.0f)
continue;
/* prepare to test V parameter */
const vec3 qvec = tvec.cross(edge1);
/* calculate V parameter and test bounds */
const float v = ray.d.dot(qvec) * inv_det;
if(v < 0.0f || u + v > 1.0f)
continue;
/* calculate t, ray hits triangle */
const float f = edge2.dot(qvec) * inv_det;
if((f >= tmax) || (f < -HIT_EPSILON))
continue;
// Have a valid hit point here. Store it.
tmax = f;
hit = tile*blockDim.x + tid;
}
}
// wait for every thread to compute its intersections before looping back and loading new tile
__syncthreads();
// ---------------------------------------------------------------------------------------------------
}
// return final results
if(gThreadIdx < numRays)
{
tmaxs[gThreadIdx] = tmax;
hits[gThreadIdx] = hit;
}
}
//---------------------------------------------------------------------------------------------------------------------
static timer s_t;
static double s_intersectTime = 0.0;
static double s_triSendTime = 0.0;
static double s_raySendTime = 0.0;
static double s_tmaxCopyTime = 0.0;
static double s_tmaxSendTime = 0.0;
static double s_kernelTime = 0.0;
static double s_tmaxReadTime = 0.0;
static double s_hitReadTime = 0.0;
static double s_hitUpdateTime = 0.0;
static double s_splitTime = 0.0;
static double s_partitionTriTime = 0.0;
static double s_partitionRayTime = 0.0;
#define TIMED_CALL(f, t) s_t.restart(); f; t += s_t.msec();
static int s_numKernelCalls = 0;
// GPU data
int* d_triIDs = 0;
TriV* d_tris = 0;
int* d_rayIDs = 0;
Ray* d_rays = 0;
float* d_tmaxs = 0;
int* d_hits = 0;
// Host data, used to read back results temporarily
float* h_tmaxs = 0;
int* h_hits = 0;
#ifdef HIT_GPU
void intersect(const TriSet& triSet, RaySet& raySet)
{
// send current IDs to GPU
TIMED_CALL(cudaMemcpy(d_triIDs, triSet.ids, triSet.end*sizeof(int), cudaMemcpyDefault), s_triSendTime);
TIMED_CALL(cudaMemcpy(d_rayIDs, raySet.ids, raySet.end*sizeof(int), cudaMemcpyDefault), s_raySendTime);
// send current tmax's to GPU
s_t.restart();
for(int i = 0; i < raySet.end; ++i)
{
h_tmaxs[i] = raySet.tmaxs[raySet.ids[i]];
}
s_tmaxCopyTime += s_t.msec();
TIMED_CALL(cudaMemcpy(d_tmaxs, h_tmaxs, raySet.end*sizeof(float), cudaMemcpyDefault), s_tmaxSendTime);
// determine parameters
const int numBlocks = ceilf((float)raySet.end / (float)BLOCKSIZE);
const int numTiles = ceilf((float)triSet.end / (float)BLOCKSIZE);
const int lastTileSize = triSet.end < BLOCKSIZE? triSet.end : triSet.end % BLOCKSIZE;
// launch kernel
s_t.restart();
kernel<<<numBlocks, BLOCKSIZE>>>(d_tris, d_triIDs, numTiles, lastTileSize, d_rays, d_rayIDs, raySet.end, d_tmaxs, d_hits);
cudaDeviceSynchronize();
s_kernelTime += s_t.msec();
++s_numKernelCalls;
// retrieve all results from GPU
TIMED_CALL(cudaMemcpy(h_tmaxs, d_tmaxs, raySet.end*sizeof(float), cudaMemcpyDefault), s_tmaxReadTime);
TIMED_CALL(cudaMemcpy(h_hits, d_hits, raySet.end*sizeof(int), cudaMemcpyDefault), s_hitReadTime);
// update data on CPU
s_t.restart();
for(int i = 0; i < raySet.end; ++i)
{
if(h_hits[i] >= 0)
{
const int rayID = raySet.ids[i];
raySet.tmaxs[rayID] = h_tmaxs[i];
raySet.hits[rayID] = triSet.ids[h_hits[i]];
}
}
s_hitUpdateTime += s_t.msec();
}
#endif
//---------------------------------------------------------------------------------------------------------------------
#ifdef HIT_CPU
void intersect(const TriSet& triSet, RaySet& raySet)
{
for(int t = 0; t < triSet.end; ++t)
{
const int triID = triSet.ids[t];
const TriV& tri = triSet.tris[triID];
for(int r = 0; r < raySet.end; ++r)
{
const int rayID = raySet.ids[r];
Ray& ray = raySet.rays[rayID];
/* find vectors for two edges sharing vert0 */
const vec3 edge1 = tri.v1 - tri.v0;
const vec3 edge2 = tri.v2 - tri.v0;
/* begin calculating determinant - also used to calculate U parameter */
const vec3 pvec = ray.d.cross(edge2);
/* if determinant is near zero, ray lies in plane of triangle */
const float det = edge1.dot(pvec);
if(det > -HIT_EPSILON && det < HIT_EPSILON)
continue;
const float inv_det = 1.0f / det;
/* calculate distance from vert0 to ray origin */
const vec3 tvec = ray.o - tri.v0;
/* calculate U parameter and test bounds */
const float u = tvec.dot(pvec) * inv_det;
if(u < 0.0f || u > 1.0f)
continue;
/* prepare to test V parameter */
const vec3 qvec = tvec.cross(edge1);
/* calculate V parameter and test bounds */
const float v = ray.d.dot(qvec) * inv_det;
if(v < 0.0f || u + v > 1.0f)
continue;
/* calculate t, ray hits triangle */
const float f = edge2.dot(qvec) * inv_det;
if((f >= raySet.tmaxs[rayID]) || (f < -HIT_EPSILON))
continue;
// Have a valid hit point here. Store it.
raySet.tmaxs[rayID] = f;
raySet.hits[rayID] = triID;
}
}
}
#endif
void splitBox(const Box& triBox, const TriSet& triSet, const RaySet& raySet, Box& nearBox, Box& farBox)
{
const float dx = triBox.max.x - triBox.min.x;
const float dy = triBox.max.y - triBox.min.y;
const float dz = triBox.max.z - triBox.min.z;
const int axis = (dx > dy && dx > dz)? 0 : (dy > dz)? 1 : 2;
const float pos = (triBox.min[axis] + triBox.max[axis]) * 0.5f;
Box left = triBox;
Box right = triBox;
left.max[axis] = pos;
right.min[axis] = pos;
const bool leftNear = raySet.rays[0].d[pos] >= 0.0f;
nearBox = leftNear? left : right;
farBox = leftNear? right : left;
}
void partitionTris(const Box& box, TriSet& triSet)
{
int newEnd = 0;
for(int i = 0; i < triSet.end; ++i)
{
Box triBox;
triBox.expand(triSet.tris[triSet.ids[i]]);
if(box.max.x < triBox.min.x) continue;
if(box.min.x > triBox.max.x) continue;
if(box.max.y < triBox.min.y) continue;
if(box.min.y > triBox.max.y) continue;
if(box.max.z < triBox.min.z) continue;
if(box.min.z > triBox.max.z) continue;
std::swap(triSet.ids[i], triSet.ids[newEnd]);
++newEnd;
}
triSet.end = newEnd;
}
void partitionRays(const Box& box, RaySet& raySet)
{
int newEnd = 0;
for(int i = 0; i < raySet.end; ++i)
{
const Ray& ray = raySet.rays[raySet.ids[i]];
const vec3 invDir = ray.d.reciprocal();
const float tx1 = (box.min.x - ray.o.x) * invDir.x;
const float tx2 = (box.max.x - ray.o.x) * invDir.x;
float tmin = std::min(tx1, tx2);
float tmax = std::max(tx1, tx2);
const float ty1 = (box.min.y - ray.o.y) * invDir.y;
const float ty2 = (box.max.y - ray.o.y) * invDir.y;
tmin = std::max(tmin, std::min(ty1, ty2));
tmax = std::min(tmax, std::max(ty1, ty2));
const float tz1 = (box.min.z - ray.o.z) * invDir.z;
const float tz2 = (box.max.z - ray.o.z) * invDir.z;
tmin = std::max(tmin, std::min(tz1, tz2));
tmax = std::min(raySet.tmaxs[raySet.ids[i]], std::min(tmax, std::max(tz1, tz2)));
if(tmin > tmax) continue;
std::swap(raySet.ids[i], raySet.ids[newEnd]);
++newEnd;
}
raySet.end = newEnd;
}
//---------------------------------------------------------------------------------------------------------------------
void reallocData(const Canvas& canvas, RaySet& raySet)
{
const int npixels = canvas.w * canvas.h;
raySet.count = npixels;
// ---------------- v CPU v ----------------
cudaFreeHost(raySet.ids);
cudaMallocHost(&raySet.ids, raySet.count*sizeof(int));
for(int i = 0; i < raySet.count; ++i)
{
raySet.ids[i] = i;
}
cudaFreeHost(raySet.rays);
cudaMallocHost(&raySet.rays, raySet.count*sizeof(Ray));
cudaFreeHost(raySet.tmaxs);
cudaMallocHost(&raySet.tmaxs, raySet.count*sizeof(float));
cudaFreeHost(h_tmaxs);
cudaMallocHost(&h_tmaxs, raySet.count*sizeof(float));
cudaFreeHost(raySet.hits);
cudaMallocHost(&raySet.hits, raySet.count*sizeof(int));
cudaFreeHost(h_hits);
cudaMallocHost(&h_hits, raySet.count*sizeof(int));
// ---------------- v GPU v ----------------
cudaFree(d_rayIDs);
cudaMalloc(&d_rayIDs, raySet.count*sizeof(int));
cudaFree(d_rays);
cudaMalloc(&d_rays, raySet.count*sizeof(Ray));
cudaFree(d_tmaxs);
cudaMalloc(&d_tmaxs, raySet.count*sizeof(float));
cudaFree(d_hits);
cudaMalloc(&d_hits, raySet.count*sizeof(int));
}
void updateCamera(const Canvas& canvas, const LookAt& lookAt, Camera& camera)
{
// store position
camera.position = lookAt.eye;
// pre-computations
float invHeight = 1.0f / canvas.h;
float invWidth = 1.0f / canvas.w;
// compute camera basis
vec3 axisW = (lookAt.eye - lookAt.center).normalized();
vec3 axisV = lookAt.up.normalized();
vec3 axisU = axisV.cross(axisW);
// compute half scale factors for each basis vector
float sw = canvas.w * 0.01f; // try to keep directions around zero in floating-point value
float sv = sw * std::tan(0.523598775f); // half 60o in radians
float su = sv * canvas.w * invHeight;
// scale each vector
axisW *= sw;
axisV *= sv;
axisU *= su;
// store final direction
camera.lowerLeftDir = - axisU - axisV - axisW;
// compute full scales
axisV *= 2.0f;
axisU *= 2.0f;
// interpolation deltas
camera.dv = axisV * invHeight - axisU; // also goes back to start of u-axis
camera.du = axisU * invWidth;
// number of pixels in U and V directions
camera.nu = canvas.w;
camera.nv = canvas.h;
}
void generatePrimary(const Camera& camera, RaySet& raySet)
{
vec3 dir = camera.lowerLeftDir;
for(int v = 0; v < camera.nv; ++v)
{
for(int u = 0; u < camera.nu; ++u)
{
Ray& r = raySet.rays[v*camera.nu+u];
r.o = camera.position;
r.d = dir;
dir += camera.du;
}
dir += camera.dv;
}
for(int i = 0; i < raySet.count; ++i)
{
raySet.tmaxs[i] = FLT_MAX;
raySet.hits[i] = -1;
}
cudaMemcpy(d_rays, raySet.rays, raySet.count*sizeof(Ray), cudaMemcpyDefault);
}
void traceRays(const Box& triBox, TriSet& triSet, RaySet& raySet)
{
#ifdef HIT_GPU
if(triSet.end == 0 || raySet.end == 0)
{
return;
}
if(triSet.end < TRI_LIMIT && raySet.end < RAY_LIMIT)
#endif
#ifdef HIT_CPU
if(triSet.end < TRI_LIMIT || raySet.end < RAY_LIMIT)
#endif
{
static timer s_int_t;
s_int_t.restart();
intersect(triSet, raySet);
s_intersectTime += s_int_t.msec();
return;
}
Box nearBox;
Box farBox;
TIMED_CALL(splitBox(triBox, triSet, raySet, nearBox, farBox), s_splitTime);
int triEnd = triSet.end;
int rayEnd = raySet.end;
TIMED_CALL(partitionTris(nearBox, triSet), s_partitionTriTime);
TIMED_CALL(partitionRays(nearBox, raySet), s_partitionRayTime);
traceRays(nearBox, triSet, raySet);
triSet.end = triEnd;
raySet.end = rayEnd;
TIMED_CALL(partitionTris(farBox, triSet), s_partitionTriTime);
TIMED_CALL(partitionRays(farBox, raySet), s_partitionRayTime);
traceRays(farBox, triSet, raySet);
}
void doTraceRays(const Box& triBox, TriSet& triSet, RaySet& raySet)
{
triSet.end = triSet.count;
raySet.end = raySet.count;
traceRays(triBox, triSet, raySet);
}
void shadePixels(const TriSet& triSet, const RaySet& raySet, unsigned char* pixels)
{
for(int i = 0; i < raySet.count; ++i)
{
unsigned char c = 0;
const int triID = raySet.hits[i];
if(triID >= 0)
{
// recompute hit position
const Ray& ray = raySet.rays[i];
const vec3 hitPos = ray.o + ray.d * raySet.tmaxs[i];
// recompute barycentric coordinates
const TriV& tri = triSet.tris[triID];
const vec3 e0 = tri.v1 - tri.v0;
const vec3 e1 = tri.v2 - tri.v0;
const vec3 e2 = hitPos - tri.v0;
const float d00 = e0.dot(e0);
const float d01 = e0.dot(e1);
const float d11 = e1.dot(e1);
const float d20 = e2.dot(e0);
const float d21 = e2.dot(e1);
const float invDenom = 1.0f / (d00 * d11 - d01 * d01);
const float v = (d11 * d20 - d01 * d21) * invDenom;
const float w = (d00 * d21 - d01 * d20) * invDenom;
const float u = 1.0f - v - w;
// lerp normal
const TriN& norm = triSet.norms[triID];
const vec3 lerpN = (norm.n0*u + norm.n1*v + norm.n2*w).normalized();
// compute final color
c = 255 * lerpN.dot(-ray.d.normalized());
}
pixels[i*3+0] = c;
pixels[i*3+1] = c;
pixels[i*3+2] = c;
}
}
//---------------------------------------------------------------------------------------------------------------------
static Canvas s_canvas;
static LookAt s_lookAt;
static Camera s_camera;
static TriSet s_triSet;
static Box s_triBox;
static RaySet s_raySet;
void reshape(int w, int h)
{
s_canvas.w = w;
s_canvas.h = h;
reallocData(s_canvas, s_raySet);
}
void camera(float* eye, float* center, float* up)
{
s_lookAt.eye = vec3(eye);
s_lookAt.center = vec3(center);
s_lookAt.up = vec3(up);
updateCamera(s_canvas, s_lookAt, s_camera);
}
void render(unsigned char* pixels)
{
timer t;
s_splitTime = 0.0;
s_intersectTime = 0.0;
s_partitionTriTime = 0.0;
s_partitionRayTime = 0.0;
s_triSendTime = 0.0;
s_raySendTime = 0.0;
s_tmaxCopyTime = 0.0;
s_tmaxSendTime = 0.0;
s_kernelTime = 0.0;
s_tmaxReadTime = 0.0;
s_hitReadTime = 0.0;
s_hitUpdateTime = 0.0;
s_numKernelCalls = 0;
std::cout << "---------------------------------" << std::endl;
t.restart();
generatePrimary(s_camera, s_raySet);
std::cout << "generate: " << std::setw(15) << (int)t.msec() << " ms" << std::endl;
t.restart();
doTraceRays(s_triBox, s_triSet, s_raySet);
std::cout << "trace: " << std::setw(15) << (int)t.msec() << " ms" << std::endl;
std::cout << " " << "split: " << std::setw(9) << (int)s_splitTime << " ms" << std::endl;
std::cout << " " << "intersect: " << std::setw(9) << (int)s_intersectTime << " ms" << std::endl;
std::cout << " " << "triSend: " << std::setw(9) << s_triSendTime << " ms" << std::endl;
std::cout << " " << "raySend: " << std::setw(9) << s_raySendTime << " ms" << std::endl;
std::cout << " " << "tmaxCopy: " << std::setw(9) << s_tmaxCopyTime << " ms" << std::endl;
std::cout << " " << "tmaxSend: " << std::setw(9) << s_tmaxSendTime << " ms" << std::endl;
std::cout << " " << "kernel: " << std::setw(9) << s_kernelTime << " ms (count: " << s_numKernelCalls << ", avg: " << s_kernelTime / s_numKernelCalls << " ms)" << std::endl;
std::cout << " " << "tmaxRead: " << std::setw(9) << s_tmaxReadTime << " ms" << std::endl;
std::cout << " " << "hitRead: " << std::setw(9) << s_hitReadTime << " ms" << std::endl;
std::cout << " " << "hitUpdate: " << std::setw(9) << s_hitUpdateTime << " ms" << std::endl;
std::cout << " " << "partitionTri: " << std::setw(9) << (int)s_partitionTriTime << " ms" << std::endl;
std::cout << " " << "partitionRay: " << std::setw(9) << (int)s_partitionRayTime << " ms" << std::endl;
t.restart();
shadePixels(s_triSet, s_raySet, pixels);
std::cout << "shade: " << std::setw(15) << (int)t.msec() << " ms" << std::endl;
}
//---------------------------------------------------------------------------------------------------------------------
void loadScene1tri(TriSet& triSet)
{
TriV tri;
tri.v0 = vec3(-1,-1,0);
tri.v1 = vec3(1,-1,0);
tri.v2 = vec3(0,1,0);
triSet.tris = new TriV[1];
triSet.tris[0] = tri;
TriN norm;
norm.n0 = (tri.v1 - tri.v0).cross(tri.v2-tri.v0).normalized();
norm.n1 = norm.n0;
norm.n2 = norm.n0;
triSet.norms = new TriN[1];
triSet.norms[0] = norm;
triSet.ids = new int[1];
triSet.ids[0] = 0;
triSet.count = 1;
}
float randf(float min, float max)
{
return min + (float)rand()/(float)RAND_MAX * (max - min);
}
void loadSceneRand(TriSet& triSet)
{
triSet.count = 100000;
triSet.tris = new TriV[triSet.count];
triSet.norms = new TriN[triSet.count];
int dst = 0;
srand(122);
for(int i = 0; i < triSet.count; ++i)
{
vec3 c(randf(-5.0f, 5.0f), randf(-5.0f, 5.0f), randf(-5.0f, 5.0f));
TriV tri;
tri.v0 = c + vec3(randf(0.1f, 0.5f), randf(0.1f, 0.5f), randf(0.1f, 0.5f));
tri.v1 = c + vec3(randf(0.1f, 0.5f), randf(0.1f, 0.5f), randf(0.1f, 0.5f));
tri.v2 = c + vec3(randf(0.1f, 0.5f), randf(0.1f, 0.5f), randf(0.1f, 0.5f));
triSet.tris[dst] = tri;
TriN norm;
norm.n0 = (tri.v1 - tri.v0).cross(tri.v2-tri.v0).normalized();
norm.n1 = norm.n0;
norm.n2 = norm.n0;
triSet.norms[dst] = norm;
++dst;
}
}
static std::vector<vec3> vertices;
static std::vector<int> elements;
static int vertex_cb(p_ply_argument argument)
{
long id;
ply_get_argument_user_data(argument, NULL, &id);
if(id == 0)
{
vertices.resize(vertices.size()+1);
}
vertices.back()[id] = ply_get_argument_value(argument) * 50;
return 1;
}
static int face_cb(p_ply_argument argument)
{
long length, value_index;
ply_get_argument_property(argument, NULL, &length, &value_index);
switch (value_index)
{
case 0:
case 1:
case 2:
elements.push_back(ply_get_argument_value(argument));
break;
default:
break;
}
return 1;
}
void loadSceneBunny(TriSet& triSet)
{
p_ply ply = ply_open("/home/potato/Downloads/bunny.ply", NULL, 0, NULL);
if (!ply) exit(1);
if (!ply_read_header(ply)) exit(1);
long nvertices = ply_set_read_cb(ply, "vertex", "x", vertex_cb, NULL, 0);
ply_set_read_cb(ply, "vertex", "y", vertex_cb, NULL, 1);
ply_set_read_cb(ply, "vertex", "z", vertex_cb, NULL, 2);
long ntriangles = ply_set_read_cb(ply, "face", "vertex_indices", face_cb, NULL, 0);
if (!ply_read(ply)) exit(1);
ply_close(ply);
triSet.count = elements.size()/3;
triSet.tris = new TriV[triSet.count];
triSet.norms = new TriN[triSet.count];
std::vector<vec3> normals(vertices.size());
for(unsigned int e = 0; e < elements.size(); e+=3)
{
int e0 = elements[e+0];
int e1 = elements[e+1];
int e2 = elements[e+2];
vec3 v0 = vertices[e0];
vec3 v1 = vertices[e1];
vec3 v2 = vertices[e2];
vec3 n = (v1-v0).cross(v2-v0);
normals[e0] += n;
normals[e1] += n;
normals[e2] += n;
}
int i = 0;
for(unsigned int e = 0; e < elements.size(); e+=3)
{
int e0 = elements[e+0];
int e1 = elements[e+1];
int e2 = elements[e+2];
TriV t;
t.v0 = vertices[e0];
t.v1 = vertices[e1];
t.v2 = vertices[e2];
triSet.tris[i] = t;
TriN n;
n.n0 = normals[e0].normalized();
n.n1 = normals[e1].normalized();
n.n2 = normals[e2].normalized();
triSet.norms[i] = n;
++i;
}
}
void commonSetup(TriSet& triSet, Box& triBox)
{
triSet.ids = new int[triSet.count];
for(int i = 0; i < triSet.count; ++i)
{
triSet.ids[i] = i;
triBox.expand(triSet.tris[i]);
}
cudaMalloc(&d_triIDs, triSet.count*sizeof(int));
cudaMemcpy(d_triIDs, triSet.ids, triSet.count*sizeof(int), cudaMemcpyDefault);
cudaMalloc(&d_tris, triSet.count*sizeof(TriV));
cudaMemcpy(d_tris, triSet.tris, triSet.count*sizeof(TriV), cudaMemcpyDefault);
vec3 center = (triBox.min + triBox.max) * 0.5f;
vec3 eye = center + vec3(0,0,10);
vec3 up(0,1,0);
rtvSetCamera(eye.ptr(), center.ptr(), up.ptr());
}
//---------------------------------------------------------------------------------------------------------------------
int main()
{
rtvInit(1024, 1024);
rtvSetReshapeCallback(reshape);
rtvSetCameraCallback(camera);
rtvSetRenderCallback(render);
// loadScene1tri(s_triSet);
// loadSceneRand(s_triSet);
loadSceneBunny(s_triSet);
commonSetup(s_triSet, s_triBox);
rtvExec();
return 0;
}
|
214c66c68c122135c3e38914f94c4d17059f9c65.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 214c66c68c122135c3e38914f94c4d17059f9c65.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
233f1a2d961e785970cae8126eea5771021f79b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* A simple example of using a structore of arrays to store data on the device.
* This example is used to study the impact on performance of data layout on the
* GPU.
*
* SoA: contiguous reads for x and y
*/
#define LEN 1 << 20
struct InnerArray
{
float x[LEN];
float y[LEN];
};
// functions for inner array outer struct
void initialInnerArray(InnerArray *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
printf("printout idx %d: x %f y %f\n", idx, C->x[idx], C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon)
{
match = 0;
printf("different on x %dth element: host %f gpu %f\n", i,
hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon)
{
match = 0;
printf("different on y %dth element: host %f gpu %f\n", i,
hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup2(InnerArray *data, InnerArray * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
// test for array of struct
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A, nElem);
testInnerArrayHost(h_A, hostRef, nElem);
// allocate device memory
InnerArray *d_A, *d_C;
CHECK(hipMalloc((InnerArray**)&d_A, nBytes));
CHECK(hipMalloc((InnerArray**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// set up offset for summary
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1:
double iStart = seconds();
hipLaunchKernelGGL(( warmup2), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup2 <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
iStart = seconds();
hipLaunchKernelGGL(( testInnerArray), dim3(grid), dim3(block), 0, 0, d_A, d_C, nElem);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("innerarray <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(hipGetLastError());
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 233f1a2d961e785970cae8126eea5771021f79b3.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* A simple example of using a structore of arrays to store data on the device.
* This example is used to study the impact on performance of data layout on the
* GPU.
*
* SoA: contiguous reads for x and y
*/
#define LEN 1 << 20
struct InnerArray
{
float x[LEN];
float y[LEN];
};
// functions for inner array outer struct
void initialInnerArray(InnerArray *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n)
{
for (int idx = 0; idx < n; idx++)
{
printf("printout idx %d: x %f y %f\n", idx, C->x[idx], C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon)
{
match = 0;
printf("different on x %dth element: host %f gpu %f\n", i,
hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon)
{
match = 0;
printf("different on y %dth element: host %f gpu %f\n", i,
hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray * result,
const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup2(InnerArray *data, InnerArray * result, const int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.f;
tmpy += 20.f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
// test for array of struct
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s test struct of array at ", argv[0]);
printf("device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A, nElem);
testInnerArrayHost(h_A, hostRef, nElem);
// allocate device memory
InnerArray *d_A, *d_C;
CHECK(cudaMalloc((InnerArray**)&d_A, nBytes));
CHECK(cudaMalloc((InnerArray**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// set up offset for summary
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution configuration
dim3 block (blocksize, 1);
dim3 grid ((nElem + block.x - 1) / block.x, 1);
// kernel 1:
double iStart = seconds();
warmup2<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
double iElaps = seconds() - iStart;
printf("warmup2 <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
iStart = seconds();
testInnerArray<<<grid, block>>>(d_A, d_C, nElem);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("innerarray <<< %3d, %3d >>> elapsed %f sec\n", grid.x, block.x,
iElaps);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
checkInnerArray(hostRef, gpuRef, nElem);
CHECK(cudaGetLastError());
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
c6b0b8ee39312b8fef3d601c0219e7a5365b693c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "math.h"
#include <string>
// Image IO
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
#include "common_hip.cuh"
#include "triangle.cuh"
// Output generated from Teg
#include "quadratic.cuh"
//#include "linear.cuh"
int main(int argc, char** argv)
{
TriMesh* mesh;
hipMallocManaged(&mesh, sizeof(TriMesh));
mesh->num_vertices = 3;
hipMallocManaged(&(mesh->vertices), sizeof(Vertex) * mesh->num_vertices);
hipMallocManaged(&(mesh->weights), sizeof(float) * 3);
mesh->num_triangles = 1;
hipMallocManaged(&(mesh->triangles), sizeof(Triangle) * mesh->num_triangles);
mesh->vertices[0] = Vertex{100, 100};
mesh->vertices[1] = Vertex{400, 100};
mesh->vertices[2] = Vertex{100, 400};
mesh->triangles[0] = Triangle{0, 2, 1};
QuadraticFragment* colors;
hipMallocManaged(&colors, sizeof(QuadraticFragment) * 1);
colors[0] = QuadraticFragment{Color{0, 1, 0}, Color{0, 0, 1}, Color{1, 0, 0},
Color{1, 0, 1}, Color{1, 1, 0}, Color{0, 1, 1}};
/*
LinearFragment* colors;
hipMallocManaged(&colors, sizeof(LinearFragment) * 1);
colors[0] = LinearFragment{Color{0, 1, 0}, Color{0, 0, 1}, Color{1, 0, 0}};
*/
int h = 512;
int w = 512;
float* triangle_image;
char* triangle_bimage = (char*) malloc(w * h * 3);
hipMallocManaged(&triangle_image, sizeof(float) * w * h * 3);
int* tids;
int* pids;
int num_jobs = w * h;
hipMallocManaged(&tids, num_jobs * sizeof(int));
hipMallocManaged(&pids, num_jobs * sizeof(int));
for (int i = 0; i < h * w; i++) {
tids[i] = 0;
pids[i] = i;
}
hipLaunchKernelGGL(( set_zero<float>), dim3(((w * h * 3) / 256) + 1), dim3(256), 0, 0, triangle_image, (w * h * 3) );
// Render triangles to image.
hipLaunchKernelGGL(( quadratic_integral_kernel), dim3((num_jobs / 256) + 1), dim3(256), 0, 0,
tids,
pids,
num_jobs,
mesh,
colors,
triangle_image,
w, h);
hipDeviceSynchronize();
for(int idx = 0; idx < w * h * 3; idx ++){
int _val = (int)(triangle_image[idx] * 256);
triangle_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
std::stringstream ss;
ss << "test.png";
cv::imwrite(ss.str(), cv::Mat(w, h, CV_8UC3, triangle_bimage));
hipFree(triangle_image);
hipFree(tids);
hipFree(pids);
} | c6b0b8ee39312b8fef3d601c0219e7a5365b693c.cu | #include <stdio.h>
#include "math.h"
#include <string>
// Image IO
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <sstream>
#include "common.cuh"
#include "triangle.cuh"
// Output generated from Teg
#include "quadratic.cuh"
//#include "linear.cuh"
int main(int argc, char** argv)
{
TriMesh* mesh;
cudaMallocManaged(&mesh, sizeof(TriMesh));
mesh->num_vertices = 3;
cudaMallocManaged(&(mesh->vertices), sizeof(Vertex) * mesh->num_vertices);
cudaMallocManaged(&(mesh->weights), sizeof(float) * 3);
mesh->num_triangles = 1;
cudaMallocManaged(&(mesh->triangles), sizeof(Triangle) * mesh->num_triangles);
mesh->vertices[0] = Vertex{100, 100};
mesh->vertices[1] = Vertex{400, 100};
mesh->vertices[2] = Vertex{100, 400};
mesh->triangles[0] = Triangle{0, 2, 1};
QuadraticFragment* colors;
cudaMallocManaged(&colors, sizeof(QuadraticFragment) * 1);
colors[0] = QuadraticFragment{Color{0, 1, 0}, Color{0, 0, 1}, Color{1, 0, 0},
Color{1, 0, 1}, Color{1, 1, 0}, Color{0, 1, 1}};
/*
LinearFragment* colors;
cudaMallocManaged(&colors, sizeof(LinearFragment) * 1);
colors[0] = LinearFragment{Color{0, 1, 0}, Color{0, 0, 1}, Color{1, 0, 0}};
*/
int h = 512;
int w = 512;
float* triangle_image;
char* triangle_bimage = (char*) malloc(w * h * 3);
cudaMallocManaged(&triangle_image, sizeof(float) * w * h * 3);
int* tids;
int* pids;
int num_jobs = w * h;
cudaMallocManaged(&tids, num_jobs * sizeof(int));
cudaMallocManaged(&pids, num_jobs * sizeof(int));
for (int i = 0; i < h * w; i++) {
tids[i] = 0;
pids[i] = i;
}
set_zero<float><<<((w * h * 3) / 256) + 1, 256>>>(triangle_image, (w * h * 3) );
// Render triangles to image.
quadratic_integral_kernel<<<(num_jobs / 256) + 1, 256>>>(
tids,
pids,
num_jobs,
mesh,
colors,
triangle_image,
w, h);
cudaDeviceSynchronize();
for(int idx = 0; idx < w * h * 3; idx ++){
int _val = (int)(triangle_image[idx] * 256);
triangle_bimage[idx] = (char) ((_val < 0) ? 0 : (_val > 255 ? 255 : _val));
}
std::stringstream ss;
ss << "test.png";
cv::imwrite(ss.str(), cv::Mat(w, h, CV_8UC3, triangle_bimage));
cudaFree(triangle_image);
cudaFree(tids);
cudaFree(pids);
} |
b69768d1521fa8dee6d168b6bc5ac89bfcbb4b24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// imgproc_main.cpp
//
//
// Created by Nathaniel Lewis on 3/8/12.
// Copyright (c) 2012 E1FTW Games. All rights reserved.
//
#include <iostream>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
// GPU constant memory to hold our kernels (extremely fast access time)
__constant__ float convolutionKernelStore[256];
/**
* Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border
* of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom
*
* @param source Source image host pinned memory pointer
* @param width Source image width
* @param height Source image height
* @param paddingX source image padding along x
* @param paddingY source image padding along y
* @param kOffset offset into kernel store constant memory
* @param kWidth kernel width
* @param kHeight kernel height
* @param destination Destination image host pinned memory pointer
*/
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i+pWidth);
int kj = (j+pHeight);
float w = convolutionKernelStore[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y+j) * width) + (x+i)]);
}
}
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
// converts the pythagoran theorem along a vector on the GPU
__global__ void pythagoras(unsigned char *a, unsigned char *b, unsigned char *c)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(a[idx]);
float bf = float(b[idx]);
c[idx] = (unsigned char) sqrtf(af*af + bf*bf);
}
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
unsigned char* createImageBuffer(unsigned int bytes, unsigned char **devicePtr)
{
unsigned char *ptr = NULL;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc(&ptr, bytes, hipHostMallocMapped);
hipHostGetDevicePointer(devicePtr, ptr, 0);
return ptr;
}
int main (int argc, char** argv)
{
// Open a webcamera
cv::VideoCapture camera(0);
cv::Mat frame;
if(!camera.isOpened())
return -1;
// Create the capture windows
cv::namedWindow("Source");
cv::namedWindow("Greyscale");
cv::namedWindow("Blurred");
cv::namedWindow("Sobel");
// Create the cuda event timers
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Create the gaussian kernel (sum = 159)
const float gaussianKernel5x5[25] =
{
2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
5.f/159.f, 12.f/159.f, 15.f/159.f, 12.f/159.f, 5.f/159.f,
4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
};
hipMemcpyToSymbol(convolutionKernelStore, gaussianKernel5x5, sizeof(gaussianKernel5x5), 0);
const ssize_t gaussianKernel5x5Offset = 0;
// Sobel gradient kernels
const float sobelGradientX[9] =
{
-1.f, 0.f, 1.f,
-2.f, 0.f, 2.f,
-1.f, 0.f, 1.f,
};
const float sobelGradientY[9] =
{
1.f, 2.f, 1.f,
0.f, 0.f, 0.f,
-1.f, -2.f, -1.f,
};
hipMemcpyToSymbol(convolutionKernelStore, sobelGradientX, sizeof(sobelGradientX), sizeof(gaussianKernel5x5));
hipMemcpyToSymbol(convolutionKernelStore, sobelGradientY, sizeof(sobelGradientY), sizeof(gaussianKernel5x5) + sizeof(sobelGradientX));
const ssize_t sobelGradientXOffset = sizeof(gaussianKernel5x5)/sizeof(float);
const ssize_t sobelGradientYOffset = sizeof(sobelGradientX)/sizeof(float) + sobelGradientXOffset;
// Create CPU/GPU shared images - one for the initial and one for the result
camera >> frame;
unsigned char *sourceDataDevice, *blurredDataDevice, *edgesDataDevice;
cv::Mat source (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sourceDataDevice));
cv::Mat blurred (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &blurredDataDevice));
cv::Mat edges (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &edgesDataDevice));
// Create two temporary images (for holding sobel gradients)
unsigned char *deviceGradientX, *deviceGradientY;
hipMalloc(&deviceGradientX, frame.size().width * frame.size().height);
hipMalloc(&deviceGradientY, frame.size().width * frame.size().height);
// Loop while capturing images
while(1)
{
// Capture the image and store a gray conversion to the gpu
camera >> frame;
cv::cvtColor(frame, source, CV_BGR2GRAY);
// Record the time it takes to process
hipEventRecord(start);
{
// convolution kernel launch parameters
dim3 cblocks (frame.size().width / 16, frame.size().height / 16);
dim3 cthreads(16, 16);
// pythagoran kernel launch paramters
dim3 pblocks (frame.size().width * frame.size().height / 256);
dim3 pthreads(256, 1);
// Perform the gaussian blur (first kernel in store @ 0)
hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, sourceDataDevice, frame.size().width, frame.size().height, 0, 0, gaussianKernel5x5Offset, 5, 5, blurredDataDevice);
// Perform the sobel gradient convolutions (x&y padding is now 2 because there is a border of 2 around a 5x5 gaussian filtered image)
hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientXOffset, 3, 3, deviceGradientX);
hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientYOffset, 3, 3, deviceGradientY);
hipLaunchKernelGGL(( pythagoras), dim3(pblocks),dim3(pthreads), 0, 0, deviceGradientX, deviceGradientY, edgesDataDevice);
hipDeviceSynchronize();
}
hipEventRecord(stop);
// Display the elapsed time
float ms = 0.0f;
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
std::cout << "Elapsed GPU time: " << ms << " milliseconds" << std::endl;
// Show the results
cv::imshow("Source", frame);
cv::imshow("Greyscale", source);
cv::imshow("Blurred", blurred);
cv::imshow("Sobel", edges);
// Spin
if(cv::waitKey(1) == 27) break;
}
// Exit
hipHostFree(source.data);
hipHostFree(blurred.data);
hipHostFree(edges.data);
hipFree(deviceGradientX);
hipFree(deviceGradientY);
return 0;
}
| b69768d1521fa8dee6d168b6bc5ac89bfcbb4b24.cu | //
// imgproc_main.cpp
//
//
// Created by Nathaniel Lewis on 3/8/12.
// Copyright (c) 2012 E1FTW Games. All rights reserved.
//
#include <iostream>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
// GPU constant memory to hold our kernels (extremely fast access time)
__constant__ float convolutionKernelStore[256];
/**
* Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border
* of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom
*
* @param source Source image host pinned memory pointer
* @param width Source image width
* @param height Source image height
* @param paddingX source image padding along x
* @param paddingY source image padding along y
* @param kOffset offset into kernel store constant memory
* @param kWidth kernel width
* @param kHeight kernel height
* @param destination Destination image host pinned memory pointer
*/
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i+pWidth);
int kj = (j+pHeight);
float w = convolutionKernelStore[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y+j) * width) + (x+i)]);
}
}
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
// converts the pythagoran theorem along a vector on the GPU
__global__ void pythagoras(unsigned char *a, unsigned char *b, unsigned char *c)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(a[idx]);
float bf = float(b[idx]);
c[idx] = (unsigned char) sqrtf(af*af + bf*bf);
}
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
unsigned char* createImageBuffer(unsigned int bytes, unsigned char **devicePtr)
{
unsigned char *ptr = NULL;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc(&ptr, bytes, cudaHostAllocMapped);
cudaHostGetDevicePointer(devicePtr, ptr, 0);
return ptr;
}
int main (int argc, char** argv)
{
// Open a webcamera
cv::VideoCapture camera(0);
cv::Mat frame;
if(!camera.isOpened())
return -1;
// Create the capture windows
cv::namedWindow("Source");
cv::namedWindow("Greyscale");
cv::namedWindow("Blurred");
cv::namedWindow("Sobel");
// Create the cuda event timers
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Create the gaussian kernel (sum = 159)
const float gaussianKernel5x5[25] =
{
2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
5.f/159.f, 12.f/159.f, 15.f/159.f, 12.f/159.f, 5.f/159.f,
4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
};
cudaMemcpyToSymbol(convolutionKernelStore, gaussianKernel5x5, sizeof(gaussianKernel5x5), 0);
const ssize_t gaussianKernel5x5Offset = 0;
// Sobel gradient kernels
const float sobelGradientX[9] =
{
-1.f, 0.f, 1.f,
-2.f, 0.f, 2.f,
-1.f, 0.f, 1.f,
};
const float sobelGradientY[9] =
{
1.f, 2.f, 1.f,
0.f, 0.f, 0.f,
-1.f, -2.f, -1.f,
};
cudaMemcpyToSymbol(convolutionKernelStore, sobelGradientX, sizeof(sobelGradientX), sizeof(gaussianKernel5x5));
cudaMemcpyToSymbol(convolutionKernelStore, sobelGradientY, sizeof(sobelGradientY), sizeof(gaussianKernel5x5) + sizeof(sobelGradientX));
const ssize_t sobelGradientXOffset = sizeof(gaussianKernel5x5)/sizeof(float);
const ssize_t sobelGradientYOffset = sizeof(sobelGradientX)/sizeof(float) + sobelGradientXOffset;
// Create CPU/GPU shared images - one for the initial and one for the result
camera >> frame;
unsigned char *sourceDataDevice, *blurredDataDevice, *edgesDataDevice;
cv::Mat source (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sourceDataDevice));
cv::Mat blurred (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &blurredDataDevice));
cv::Mat edges (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &edgesDataDevice));
// Create two temporary images (for holding sobel gradients)
unsigned char *deviceGradientX, *deviceGradientY;
cudaMalloc(&deviceGradientX, frame.size().width * frame.size().height);
cudaMalloc(&deviceGradientY, frame.size().width * frame.size().height);
// Loop while capturing images
while(1)
{
// Capture the image and store a gray conversion to the gpu
camera >> frame;
cv::cvtColor(frame, source, CV_BGR2GRAY);
// Record the time it takes to process
cudaEventRecord(start);
{
// convolution kernel launch parameters
dim3 cblocks (frame.size().width / 16, frame.size().height / 16);
dim3 cthreads(16, 16);
// pythagoran kernel launch paramters
dim3 pblocks (frame.size().width * frame.size().height / 256);
dim3 pthreads(256, 1);
// Perform the gaussian blur (first kernel in store @ 0)
convolve<<<cblocks,cthreads>>>(sourceDataDevice, frame.size().width, frame.size().height, 0, 0, gaussianKernel5x5Offset, 5, 5, blurredDataDevice);
// Perform the sobel gradient convolutions (x&y padding is now 2 because there is a border of 2 around a 5x5 gaussian filtered image)
convolve<<<cblocks,cthreads>>>(blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientXOffset, 3, 3, deviceGradientX);
convolve<<<cblocks,cthreads>>>(blurredDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientYOffset, 3, 3, deviceGradientY);
pythagoras<<<pblocks,pthreads>>>(deviceGradientX, deviceGradientY, edgesDataDevice);
cudaThreadSynchronize();
}
cudaEventRecord(stop);
// Display the elapsed time
float ms = 0.0f;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Elapsed GPU time: " << ms << " milliseconds" << std::endl;
// Show the results
cv::imshow("Source", frame);
cv::imshow("Greyscale", source);
cv::imshow("Blurred", blurred);
cv::imshow("Sobel", edges);
// Spin
if(cv::waitKey(1) == 27) break;
}
// Exit
cudaFreeHost(source.data);
cudaFreeHost(blurred.data);
cudaFreeHost(edges.data);
cudaFree(deviceGradientX);
cudaFree(deviceGradientY);
return 0;
}
|
dd2e1c36bd2a43410da8e352be939e39ac2dd5db.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************/
/*2012-3 Sheng Yang
/* CUDA
/************************************************************************/
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <iostream>
#include <cutil_inline.h>
#include <hipfft.h>
using namespace std;
#define MIN_DATA -999999999 //
#define BLOCK_SIZE 512 //
#define MUL_BLOCK_SIZE 16 //
#define KAHAN_SUMMATION_FORMULA 0 //
//
//: Block
//
//shared memory
/************************************************************************/
/* A-------A------A-------A
/* \ / \ /
/* 2A 2A
/* \ /
/* 4A
/************************************************************************/
//
//@param data_in
//@param row
//@param column
//@param index
//@param value
__global__ void Matrix_MaxofRow(float *data_in, int row, int column, int *index, float *value)
{
__shared__ float max[BLOCK_SIZE];
__shared__ float ind[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
max[tid] = MIN_DATA;
ind[tid] = -1;
if( tid >= column || bid >= row)
return;
unsigned int begin_addr, end_addr;
begin_addr = bid * column;
end_addr = begin_addr + column;
begin_addr += tid;
while(begin_addr < end_addr)
{
float temp = data_in[begin_addr];
if(max[tid] < temp)
{
max[tid] = temp;
ind[tid] = begin_addr;
}
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
if(max[tid] < max[tid+s])
{
max[tid] = max[tid+s];
ind[tid] = ind[tid+s];
}
}
__syncthreads();
}
if(tid == 0)
{
value[bid] = max[0];
index[bid] = ind[0];
}
}
//
// 128()
//BLOCK_SIZE = 512 BLOCK_SZIE
//128 ID = ida
//ID ida%,(ida+128)%512,(ida+256)%512 (ida+384)%512
//128
//@param data_in
//@param row
//@param column
//@param index
//@param value
__global__ void Matrix_MaxofCol(float *data_in, int row, int column, int *index, float *value)
{
__shared__ float max[BLOCK_SIZE];
__shared__ int ind[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
max[tid] = MIN_DATA;
ind[tid] = -1;
int res = column - (bid + 1)*128;
unsigned int begin_addr = 128*bid + (tid>>7) * column + (tid&127);
unsigned int end_addr = row * column;
if( (tid&127) >= (res + 128))
return;
while(begin_addr < end_addr )
{
float temp = data_in[begin_addr];
if(max[tid] < temp)
{
max[tid] = temp;
ind[tid] = begin_addr;
}
begin_addr += 4* column;
}
__syncthreads();
if(tid < 256)
{
if(max[tid] < max[tid + 256])
{
max[tid] = max[tid + 256];
ind[tid] = ind[tid + 256];
}
}
__syncthreads();
if(tid < 128)
{
if(max[tid] < max[tid + 128])
{
max[tid] = max[tid + 128];
ind[tid] = ind[tid + 128];
}
}
__syncthreads();
if(tid < res+128 && tid < 128){
value[tid + bid * 128] = max[tid];
index[tid + bid * 128] = ind[tid];
}
}
//
//@param data_in
//@param row
//@param column
//@param value
__global__ void Matrix_SumofRow(float *data_in, int row, int column, float *value)
{
__shared__ float sum[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum[tid] = 0;
if( tid >= column || bid >= row)
return;
unsigned int begin_addr, end_addr;
begin_addr = bid * column;
end_addr = begin_addr + column;
begin_addr += tid;
while(begin_addr < end_addr)
{
sum[tid] += data_in[begin_addr];
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum[tid] += sum[tid + s];
}
__syncthreads();
}
if(tid == 0)
value[bid] = sum[0];
}
//
//@param data_in
//@param row
//@param column
//@param value
__global__ void Matrix_SumofCol(float *data_in, int row, int column, float *value)
{
__shared__ float sum[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum[tid] = 0;
int res = column - (bid + 1)*128;
unsigned int begin_addr = 128*bid + (tid>>7) * column + (tid&127);
unsigned int end_addr = row * column;
if( (tid&127) >= (res + 128))
return;
while(begin_addr < end_addr )
{
sum[tid] += data_in[begin_addr];
begin_addr += 4* column;
}
__syncthreads();
if(tid < 256)
sum[tid] += sum[tid + 256];
__syncthreads();
if(tid < 128)
sum[tid] += sum[tid + 128];
__syncthreads();
if(tid < res+128 && tid < 128)
value[tid + bid * 128] = sum[tid];
}
//
//
//
//@param temp_value
//@param num
//@param value
__global__ void Matrix_SumofAll(float *temp_value, int num, float *value)
{
__shared__ float sum[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
sum[tid] = 0;
if( tid >= num)
return;
unsigned int begin_addr, end_addr;
begin_addr = 0;
end_addr = num;
begin_addr += tid;
while(begin_addr < end_addr)
{
sum[tid] += temp_value[begin_addr];
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum[tid] += sum[tid + s];
}
__syncthreads();
}
if(tid == 0)
value[0] = sum[0];
}
//
//@param data_in
//@param row
//@param column
//@param value
__global__ void Matrix_SumofRow(float2 *data_in, int row, int column, float2 *value)
{
__shared__ float sum_R[BLOCK_SIZE];
__shared__ float sum_I[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum_R[tid] = 0;
sum_I[tid] = 0;
if( tid >= column || bid >= row)
return;
unsigned int begin_addr, end_addr;
begin_addr = bid * column;
end_addr = begin_addr + column;
begin_addr += tid;
while(begin_addr < end_addr)
{
float2 temp = data_in[begin_addr];
sum_R[tid] += temp.x;
sum_I[tid] += temp.y;
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum_R[tid] += sum_R[tid + s];
sum_I[tid] += sum_I[tid + s];
}
__syncthreads();
}
if(tid == 0)
{
float2 temp;
temp.x = sum_R[0];
temp.y = sum_I[0];
value[bid] = temp;
}
}
//
//@param data_in
//@param row
//@param column
//@param value
__global__ void Matrix_SumofCol(float2 *data_in, int row, int column, float2 *value)
{
__shared__ float sum_I[BLOCK_SIZE];
__shared__ float sum_R[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum_I[tid] = 0;
sum_R[tid] = 0;
int res = column - (bid + 1)*128;
unsigned int begin_addr = 128*bid + (tid>>7) * column + (tid&127);
unsigned int end_addr = row * column;
if( (tid&127) >= (res + 128))
return;
while(begin_addr < end_addr )
{
float2 temp = data_in[begin_addr];
sum_R[tid] += temp.x;
sum_I[tid] += temp.y;
begin_addr += 4* column;
}
__syncthreads();
if(tid < 256){
sum_R[tid] += sum_R[tid + 256];
sum_I[tid] += sum_I[tid + 256];
}
__syncthreads();
if(tid < 128){
sum_R[tid] += sum_R[tid + 128];
sum_I[tid] += sum_I[tid + 128];
}
__syncthreads();
if(tid < res+128 && tid < 128){
float2 temp;
temp.x = sum_R[tid];
temp.y = sum_I[tid];
value[tid + bid * 128] = temp;
}
}
//
//@param data_in
//@param row
//@param column
//@param value
__global__ void Matrix_SumofAll(float2 *temp_value, int num, float2 *value)
{
__shared__ float sum_I[BLOCK_SIZE];
__shared__ float sum_R[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
sum_R[tid] = 0;
sum_I[tid] = 0;
if( tid >= num)
return;
unsigned int begin_addr, end_addr;
begin_addr = 0;
end_addr = num;
begin_addr += tid;
while(begin_addr < end_addr)
{
float2 temp = temp_value[begin_addr];
sum_R[tid] += temp.x;
sum_I[tid] += temp.y;
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum_R[tid] += sum_R[tid + s];
sum_I[tid] += sum_I[tid + s];
}
__syncthreads();
}
if(tid == 0){
float2 temp;
temp.x = sum_R[0];
temp.y = sum_I[0];
value[0] = temp;
}
}
//
//16*16
//shared memoryglobal memory
//@param Matrix_a A
//@param row_a A
//@param col_a A
//@param lda A
//@param Matrix_b B
//@param row_b B
//@param col_b B
//@param ldb B
//@param Matrix_c A*B
//@param ldc C
__global__ static void Mult_kernel( const float *Matrix_a, int row_a, int col_a, size_t lda,
const float *Matrix_b, int row_b, int col_b, size_t ldb,
float *Matrix_c, size_t ldc)
{
__shared__ float matrix_a[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
__shared__ float matrix_b[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
unsigned int tidr = threadIdx.x;
unsigned int tidc = threadIdx.y;
unsigned int bidr = blockIdx.x * MUL_BLOCK_SIZE;
unsigned int bidc = blockIdx.y * MUL_BLOCK_SIZE;
int i,j;
float result = 0;
float comp = 0;
for(j = 0; j < col_a; j += MUL_BLOCK_SIZE)
{
if(tidr + bidr < row_a && tidc + j < col_a)
matrix_a[tidr][tidc] = Matrix_a[(tidr + bidr) * lda + tidc + j];
else
matrix_a[tidr][tidc] = 0;
if(tidr + j < row_b && tidc + bidc < col_b)
matrix_b[tidr][tidc] = Matrix_b[(tidr + j) * ldb + tidc + bidc];
else
matrix_b[tidr][tidc] = 0;
__syncthreads();
if(!KAHAN_SUMMATION_FORMULA)
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
result += matrix_a[tidr][i] * matrix_b[i][tidc];
}
else
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
float t;
comp -= matrix_a[tidr][i] * matrix_b[i][tidc];
t = result - comp;
comp = (t - result) + comp;
result = t;
}
__syncthreads();
}
if(tidr + bidr < row_a)
Matrix_c[(tidr + bidr) * ldc + tidc + bidc] = result;
}
//
//16*16
//shared memoryglobal memory
//@param Matrix_a A
//@param row_a A
//@param col_a A
//@param lda A
//@param Matrix_b B
//@param row_b B
//@param col_b B
//@param ldb B
//@param Matrix_c A*B
//@param ldc C
__global__ static void Mult_kernel( const float2 *Matrix_a, int row_a, int col_a, size_t lda,
const float2 *Matrix_b, int row_b, int col_b, size_t ldb,
float2 *Matrix_c, size_t ldc)
{
__shared__ float2 matrix_a[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
__shared__ float2 matrix_b[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
unsigned int tidr = threadIdx.x;
unsigned int tidc = threadIdx.y;
unsigned int bidr = blockIdx.x * MUL_BLOCK_SIZE;
unsigned int bidc = blockIdx.y * MUL_BLOCK_SIZE;
int i,j;
float2 result = make_float2(0,0);
float2 comp = make_float2(0,0);
for(j = 0; j < col_a; j += MUL_BLOCK_SIZE)
{
if(tidr + bidr < row_a && tidc + j < col_a)
matrix_a[tidr][tidc] = Matrix_a[(tidr + bidr) * lda + tidc + j];
else
matrix_a[tidr][tidc] = make_float2(0,0);
if(tidr + j < row_b && tidc + bidc < col_b)
matrix_b[tidr][tidc] = Matrix_b[(tidr + j) * ldb + tidc + bidc];
else
matrix_b[tidr][tidc] = make_float2(0,0);
__syncthreads();
if(!KAHAN_SUMMATION_FORMULA)
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
result.x+= matrix_a[tidr][i].x * matrix_b[i][tidc].x;
result.y+= matrix_a[tidr][i].y * matrix_b[i][tidc].y;
}
else
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
float2 t;
comp.x -= matrix_a[tidr][i].x * matrix_b[i][tidc].x;
comp.y -= matrix_a[tidr][i].y * matrix_b[i][tidc].y;
t.x = result.x - comp.x;
t.y = result.y - comp.y;
comp.x = (t.x - result.x) + comp.x;
comp.y = (t.y - result.y) + comp.y;
result = t;
}
__syncthreads();
}
if(tidr + bidr < row_a)
Matrix_c[(tidr + bidr) * ldc + tidc + bidc] = result;
}
//
//:sharedmemory16*16
//@param T
//@param Matrix_in
//@param row
//@param col
//@param Matrix_out
template<class T>
__global__ static void Transpose_kernel(const T * Matrix_in, int row, int col, T * Matrix_out)
{
__shared__ T temp[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE + 1];
unsigned int xIndex = blockIdx.x * MUL_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * MUL_BLOCK_SIZE + threadIdx.y;
if((xIndex < col) && (yIndex < row))
{
unsigned int index_in = yIndex * col + xIndex;
temp[threadIdx.y][threadIdx.x] = Matrix_in[index_in];
}
__syncthreads();
xIndex = MUL_BLOCK_SIZE*blockIdx.y + threadIdx.x;
yIndex = MUL_BLOCK_SIZE*blockIdx.x + threadIdx.y;
if((xIndex < row) && (yIndex < col))
{
unsigned int index_out = yIndex * row + xIndex;
Matrix_out[index_out] = temp[threadIdx.x][threadIdx.y];
}
}
/************************************************************************/
/* flag = 0 flag = 1
/* @param data_in :
/* @param row :
/* @param col :
/* @param index : /
/* @param value : /
/* @param flag : 0 1
/************************************************************************/
extern "C"
void Matrix_Max(float *data_in, int row, int col,
int *index, float *value, int flag)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory;
float *d_data;
int *d_index;
float *d_value;
int res_num = 0;
if(flag == 0)
res_num = row;
else
res_num = col;
CUDA_SAFE_CALL(hipMalloc((void**)& d_data, sizeof(float)* row * col));
CUDA_SAFE_CALL(hipMalloc((void**)& d_index, sizeof(int)* res_num));
CUDA_SAFE_CALL(hipMalloc((void**)& d_value, sizeof(float)* res_num));
memory = sizeof(float)*row*col + sizeof(int)*res_num + sizeof(float)*res_num;
CUDA_SAFE_CALL(hipMemcpy(d_data, data_in,sizeof(float)*row*col, hipMemcpyHostToDevice));
if(flag == 0)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 grid_size(row,1,1);
hipLaunchKernelGGL(( Matrix_MaxofRow), dim3(grid_size),dim3(block_size), 0, 0, d_data, row, col,d_index, d_value);
cutilCheckMsg("kernel launch failure");
}
else
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(col/128+1,1,1);
hipLaunchKernelGGL(( Matrix_MaxofCol), dim3(gird_size), dim3(block_size), 0, 0, d_data, row, col, d_index, d_value);
cutilCheckMsg("kernel launch failure");
}
CUDA_SAFE_CALL(hipMemcpy(index, d_index, res_num * sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(value, d_value, res_num * sizeof(float), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_data));
CUDA_SAFE_CALL(hipFree(d_index));
CUDA_SAFE_CALL(hipFree(d_value));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/* flag = 0 flag = 1 flag = 2
/*@param data_in :
/*@param row :
/*@param col :
/*@param value : /
/*@param flag : 0 1 2
/************************************************************************/
extern "C"
void Matrix_Sum(float *data_in, int row, int col, float *value, int flag)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory;
float *d_data;
float *d_value;
int res_num = 0;
if(flag == 0)
res_num = row;
else if(flag == 1)
res_num = col;
else
res_num = 1;
CUDA_SAFE_CALL(hipMalloc((void**)& d_data, sizeof(float)* row * col));
CUDA_SAFE_CALL(hipMalloc((void**)& d_value, sizeof(float)* res_num));
memory = sizeof(float)*row*col + sizeof(float)*res_num;
CUDA_SAFE_CALL(hipMemcpy(d_data, data_in,sizeof(float)*row*col, hipMemcpyHostToDevice));
if(flag == 0)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 grid_size(row,1,1);
hipLaunchKernelGGL(( Matrix_SumofRow), dim3(grid_size),dim3(block_size), 0, 0, d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else if(flag == 1)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(col/128+1,1,1);
hipLaunchKernelGGL(( Matrix_SumofCol), dim3(gird_size), dim3(block_size), 0, 0, d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(row,1,1);
float *temp_value;
hipMalloc((void**)&temp_value, sizeof(float)* row);
memory += sizeof(float)*row;
hipLaunchKernelGGL(( Matrix_SumofRow), dim3(gird_size), dim3(block_size), 0, 0, d_data, row, col, temp_value);
cutilCheckMsg("kernel launch failure");
hipLaunchKernelGGL(( Matrix_SumofAll), dim3(1),dim3(block_size), 0, 0, temp_value,row,d_value);
cutilCheckMsg("kernel launch failure");
}
CUDA_SAFE_CALL(hipMemcpy(value, d_value, res_num * sizeof(float), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_data));
CUDA_SAFE_CALL(hipFree(d_value));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/* flag = 0 flag = 1 flag = 2
/*@param data_in :
/*@param row :
/*@param col :
/*@param value : /
/*@param flag : 0 1 2
/************************************************************************/
extern "C"
void Matrix_SumCom(hipComplex *data_in, int row, int col, hipComplex *value, int flag)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory;
float2 *d_data;
float2 *d_value;
int res_num = 0;
if(flag == 0)
res_num = row;
else if(flag == 1)
res_num = col;
else
res_num = 1;
CUDA_SAFE_CALL(hipMalloc((void**)& d_data, sizeof(float2)* row * col));
CUDA_SAFE_CALL(hipMalloc((void**)& d_value, sizeof(float2)* res_num));
memory = sizeof(float2)*row*col + sizeof(float2)*res_num;
CUDA_SAFE_CALL(hipMemcpy(d_data, data_in,sizeof(float2)*row*col, hipMemcpyHostToDevice));
if(flag == 0)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 grid_size(row,1,1);
hipLaunchKernelGGL(( Matrix_SumofRow), dim3(grid_size),dim3(block_size), 0, 0, d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else if(flag == 1)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(col/128+1,1,1);
hipLaunchKernelGGL(( Matrix_SumofCol), dim3(gird_size), dim3(block_size), 0, 0, d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(row,1,1);
float2 *temp_value;
hipMalloc((void**)&temp_value, sizeof(float2)* row);
memory += sizeof(float2) * row;
hipLaunchKernelGGL(( Matrix_SumofRow), dim3(gird_size), dim3(block_size), 0, 0, d_data, row, col, temp_value);
cutilCheckMsg("kernel launch failure");
hipLaunchKernelGGL(( Matrix_SumofAll), dim3(1),dim3(block_size), 0, 0, temp_value,row,d_value);
cutilCheckMsg("kernel launch failure");
}
CUDA_SAFE_CALL(hipMemcpy(value, d_value, res_num * sizeof(float2), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_data));
CUDA_SAFE_CALL(hipFree(d_value));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*
/*@param Matrix_a : A
/*@param row_a : A
/*@param col_a : A
/*@param Matrix_b : B
/*@param row_b : B
/*@param col_b : B
/*@param Matrix_c : C
/*@param flag : 0 1
/************************************************************************/
extern "C"
void Matrix_Multi(const float *Matrix_a, int row_a, int col_a, const float *Matrix_b, int row_b, int col_b, float *Matrix_c)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory = 0;
float *Matrix_da, *Matrix_db, *Matrix_dc;
dim3 threads(MUL_BLOCK_SIZE,MUL_BLOCK_SIZE);
int block_width = (row_a + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col_b + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width, block_height);
// 16*16 cudaMallocPitch
//hipMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum hipMemcpyKind kind);
//dst src
//dpitch (byte)
//spitch (byte)
//width (byte)
//height ()
//kind
size_t pitch_a, pitch_b, pitch_c;
//16
CUDA_SAFE_CALL(hipMallocPitch((void**) & Matrix_da, &pitch_a, sizeof(float) * col_a, row_a));
CUDA_SAFE_CALL(hipMallocPitch((void**) & Matrix_db, &pitch_b, sizeof(float) * col_b, row_b));
CUDA_SAFE_CALL(hipMallocPitch((void**) & Matrix_dc, &pitch_c, sizeof(float) * col_b, row_a));
memory += pitch_a * row_a + pitch_b * row_b + pitch_c * row_a;
/*cout<<"Pitch_a: "<<pitch_a<<endl;
cout<<"Pitch_b: "<<pitch_b<<endl;
cout<<"Pitch_c: "<<pitch_c<<endl*/;
CUDA_SAFE_CALL(hipMemcpy2D( Matrix_da, pitch_a, Matrix_a, sizeof(float)*col_a,
sizeof(float)*col_a, row_a, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy2D( Matrix_db, pitch_b, Matrix_b, sizeof(float)*col_b,
sizeof(float)*col_b, row_b, hipMemcpyHostToDevice));
//Kernel
hipLaunchKernelGGL(( Mult_kernel), dim3(blocks),dim3(threads), 0, 0, Matrix_da,row_a,col_a,pitch_a/sizeof(float),
Matrix_db,row_b,col_b,pitch_b/sizeof(float),
Matrix_dc,pitch_c/sizeof(float));
cutilCheckMsg("kernel launch failure");
//
CUDA_SAFE_CALL(hipMemcpy2D(
Matrix_c, sizeof(float)*col_b, Matrix_dc, pitch_c,
sizeof(float)*col_b, row_a, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(Matrix_da));
CUDA_SAFE_CALL(hipFree(Matrix_db));
CUDA_SAFE_CALL(hipFree(Matrix_dc));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*
/*@param Matrix_a : A
/*@param row_a : A
/*@param col_a : A
/*@param Matrix_b : B
/*@param row_b : B
/*@param col_b : B
/*@param Matrix_c : C
/************************************************************************/
extern "C"
void Matrix_MultiCom(const float2 *Matrix_a, int row_a, int col_a, const float2 *Matrix_b, int row_b, int col_b, float2 *Matrix_c)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory = 0;
float2 *Matrix_da, *Matrix_db, *Matrix_dc;
dim3 threads(MUL_BLOCK_SIZE,MUL_BLOCK_SIZE);
int block_width = (row_a + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col_b + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width, block_height);
// 16*16 cudaMallocPitch
//hipMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum hipMemcpyKind kind);
//dst src
//dpitch (byte)
//spitch (byte)
//width (byte)
//height ()
//kind
size_t pitch_a, pitch_b, pitch_c;
//16
CUDA_SAFE_CALL(hipMallocPitch((void**) & Matrix_da, &pitch_a, sizeof(float2) * col_a, row_a));
CUDA_SAFE_CALL(hipMallocPitch((void**) & Matrix_db, &pitch_b, sizeof(float2) * col_b, row_b));
CUDA_SAFE_CALL(hipMallocPitch((void**) & Matrix_dc, &pitch_c, sizeof(float2) * col_b, row_a));
memory += pitch_a * row_a + pitch_b * row_b + pitch_c * row_a;
//cout<<"Pitch_a: "<<pitch_a<<endl;
//cout<<"Pitch_b: "<<pitch_b<<endl;
//cout<<"Pitch_c: "<<pitch_c<<endl;
CUDA_SAFE_CALL(hipMemcpy2D( Matrix_da, pitch_a, Matrix_a, sizeof(float2)*col_a,
sizeof(float2)*col_a, row_a, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy2D( Matrix_db, pitch_b, Matrix_b, sizeof(float2)*col_b,
sizeof(float2)*col_b, row_b, hipMemcpyHostToDevice));
//Kernel
hipLaunchKernelGGL(( Mult_kernel), dim3(blocks),dim3(threads), 0, 0, Matrix_da,row_a,col_a,pitch_a/sizeof(float2),
Matrix_db,row_b,col_b,pitch_b/sizeof(float2),
Matrix_dc,pitch_c/sizeof(float2));
cutilCheckMsg("kernel launch failure");
CUDA_SAFE_CALL(hipMemcpy2D(
Matrix_c, sizeof(float2)*col_b, Matrix_dc, pitch_c,
sizeof(float2)*col_b, row_a, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(Matrix_da));
CUDA_SAFE_CALL(hipFree(Matrix_db));
CUDA_SAFE_CALL(hipFree(Matrix_dc));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*
/*@param Matrix_in :
/*@param row :
/*@param col :
/*@param Matrix_out:
/************************************************************************/
extern "C"
void Matrix_Transpose(const float *Matrix_in, int row, int col, float *Matrix_out)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory = 0;
float* Matrix_din, *Matrix_dout;
dim3 threads(MUL_BLOCK_SIZE, MUL_BLOCK_SIZE);
int block_width = (row + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col+ MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width,block_height);
CUDA_SAFE_CALL(hipMalloc((void**)&Matrix_dout,sizeof(float)*row*col));
CUDA_SAFE_CALL(hipMalloc((void**)&Matrix_din,sizeof(float)*row*col));
memory += 2 * sizeof(float)*row*col;
CUDA_SAFE_CALL(hipMemcpy(Matrix_din, Matrix_in, sizeof(float)*row*col,hipMemcpyHostToDevice));
hipLaunchKernelGGL(( Transpose_kernel<float>), dim3(blocks), dim3(threads), 0, 0, Matrix_din, row, col, Matrix_dout);
cutilCheckMsg("kernel launch failure");
CUDA_SAFE_CALL(hipMemcpy(Matrix_out, Matrix_dout, sizeof(float)*row*col,hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(Matrix_dout));
CUDA_SAFE_CALL(hipFree(Matrix_din));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*
/*@param Matrix_in :
/*@param row :
/*@param col :
/*@param Matrix_out:
/************************************************************************/
extern "C"
void Matrix_TransposeCom(const float2 *Matrix_in, int row, int col, float2 *Matrix_out)
{
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float memory = 0;
float2* Matrix_din, *Matrix_dout;
dim3 threads(MUL_BLOCK_SIZE, MUL_BLOCK_SIZE);
int block_width = (row + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col+ MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width,block_height);
CUDA_SAFE_CALL(hipMalloc((void**)&Matrix_dout,sizeof(float2)*row*col));
CUDA_SAFE_CALL(hipMalloc((void**)&Matrix_din,sizeof(float2)*row*col));
memory += 2 * sizeof(float2)*row*col;
CUDA_SAFE_CALL(hipMemcpy(Matrix_din, Matrix_in, sizeof(float2)*row*col,hipMemcpyHostToDevice));
hipLaunchKernelGGL(( Transpose_kernel<float2>), dim3(blocks), dim3(threads), 0, 0, Matrix_din, row, col, Matrix_dout);
cutilCheckMsg("kernel launch failure");
CUDA_SAFE_CALL(hipMemcpy(Matrix_out, Matrix_dout, sizeof(float2)*row*col,hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(Matrix_dout));
CUDA_SAFE_CALL(hipFree(Matrix_din));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<": "<<time<<" ms"<<std::endl;
cout<<": "<<memory/1024<<" KB"<<endl;
} | dd2e1c36bd2a43410da8e352be939e39ac2dd5db.cu | /************************************************************************/
/*2012-3 Sheng Yang
/*็ฉ้ตๅบๆฌๆไฝ CUDA ็จๅบ ๆ ธๅฝๆฐๅฎ็ฐ ๅ็บฟ็จๅๅๅ็ญ
/************************************************************************/
#include <cuda_runtime.h>
#include <cutil.h>
#include <iostream>
#include <cutil_inline.h>
#include <cufft.h>
using namespace std;
#define MIN_DATA -999999999 //ๆฑๆๅคงๅผๆถ็จๅฐ็ๆๅฐๅผ
#define BLOCK_SIZE 512 //ไธ็ปดๅๅๅๅคงๅฐ
#define MUL_BLOCK_SIZE 16 //ไบ็ปดๅๅๅๅคงๅฐ
#define KAHAN_SUMMATION_FORMULA 0 //ๆฏๅฆๅฉ็จๅกไบจๅ
ฌๅผๅๅฐๆฑๅ่ฟ็จไธญ็่ฏฏๅทฎ็ดฏ็งฏ
//็ฉ้ตๆ่กๆฑๆๅคงๅผ
//ๅบๆฌๆๆณ: ๆฏไธไธชBlock่ด่ดฃไธ่ก ๏ผๅบๆฌๆปก่ถณๅๅนถ่ฎฟ้ฎ
//ไธ้ขๆ่กๆไฝ็่ฟ็ฎ ไนๆฏๅไธๆๆณ ไธๅ่ต่ฟฐ
//ๅฉ็จshared memory่ฟ่กๅนถ่กๅฝ็บฆๅผๅ ๆณ๏ผ
/************************************************************************/
/* A-------A------A-------A
/* \ / \ /
/* 2A 2A
/* \ /
/* 4A
/************************************************************************/
//ไปฅไธๆๅฐๅฝ็บฆ็ฎๆณๅๆฏ่ฟ็งๆๆณ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param index ่พๅบ ๆๅคงๅผๆๅจไฝ็ฝฎ
//@param value ่พๅบ ๆๅคงๅผ
__global__ void Matrix_MaxofRow(float *data_in, int row, int column, int *index, float *value)
{
__shared__ float max[BLOCK_SIZE];
__shared__ float ind[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
max[tid] = MIN_DATA;
ind[tid] = -1;
if( tid >= column || bid >= row)
return;
unsigned int begin_addr, end_addr;
begin_addr = bid * column;
end_addr = begin_addr + column;
begin_addr += tid;
while(begin_addr < end_addr)
{
float temp = data_in[begin_addr];
if(max[tid] < temp)
{
max[tid] = temp;
ind[tid] = begin_addr;
}
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
if(max[tid] < max[tid+s])
{
max[tid] = max[tid+s];
ind[tid] = ind[tid+s];
}
}
__syncthreads();
}
if(tid == 0)
{
value[bid] = max[0];
index[bid] = ind[0];
}
}
//็ฉ้ตๆๅๆฑๆๅคงๅผ
//ๅบๆฌๆๆณ๏ผไธบไบๆปก่ถณๅๅนถ่ฎฟ้ฎ็่ฆๆฑ ๆฏไธชๅ่ด่ดฃไบ128(ๅ
ถๅฎๅฏไปฅๆดๅฐ)ๅ็่ฟ็ฎ
//ไธ้ขๆๅ็็ฎๆณ้ฝๅชๅฏนBLOCK_SIZE = 512้็จ ๆนๅไบBLOCK_SZIEๅคงๅฐ็่ฏ ่ฆๆดๆนๆ ธๅฝๆฐ
//้ๅฏนๆฏไธไธช128ๅ ่ฅ็บฟ็จID = ida
//ๅ็บฟ็จIDไธบ ida%,(ida+128)%512,(ida+256)%512 (ida+384)%512 ่ด่ดฃๅไธๅ็ๆฐๆฎ่ฟ็ฎ
//ๆๅๆฑๆปไธไธชๅๅฐฑๅฏไปฅๆฑๅพ128ๅ็ๆฏๅ็ๆๅคงๅผ ไปฅไธๆๅๆฑๅไนๆฏ่ฟๆ ท็ๆๆณ ไธๅ่ต่ฟฐ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param index ่พๅบ ๆๅคงๅผๆๅจไฝ็ฝฎ
//@param value ่พๅบ ๆๅคงๅผ
__global__ void Matrix_MaxofCol(float *data_in, int row, int column, int *index, float *value)
{
__shared__ float max[BLOCK_SIZE];
__shared__ int ind[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
max[tid] = MIN_DATA;
ind[tid] = -1;
int res = column - (bid + 1)*128;
unsigned int begin_addr = 128*bid + (tid>>7) * column + (tid&127);
unsigned int end_addr = row * column;
if( (tid&127) >= (res + 128))
return;
while(begin_addr < end_addr )
{
float temp = data_in[begin_addr];
if(max[tid] < temp)
{
max[tid] = temp;
ind[tid] = begin_addr;
}
begin_addr += 4* column;
}
__syncthreads();
if(tid < 256)
{
if(max[tid] < max[tid + 256])
{
max[tid] = max[tid + 256];
ind[tid] = ind[tid + 256];
}
}
__syncthreads();
if(tid < 128)
{
if(max[tid] < max[tid + 128])
{
max[tid] = max[tid + 128];
ind[tid] = ind[tid + 128];
}
}
__syncthreads();
if(tid < res+128 && tid < 128){
value[tid + bid * 128] = max[tid];
index[tid + bid * 128] = ind[tid];
}
}
//ๅฎๆฐๆ่กๆฑๅ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param value ่พๅบ ๅ
__global__ void Matrix_SumofRow(float *data_in, int row, int column, float *value)
{
__shared__ float sum[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum[tid] = 0;
if( tid >= column || bid >= row)
return;
unsigned int begin_addr, end_addr;
begin_addr = bid * column;
end_addr = begin_addr + column;
begin_addr += tid;
while(begin_addr < end_addr)
{
sum[tid] += data_in[begin_addr];
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum[tid] += sum[tid + s];
}
__syncthreads();
}
if(tid == 0)
value[bid] = sum[0];
}
//ๅฎๆฐๆๅๆฑๅ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param value ่พๅบ ๅ
__global__ void Matrix_SumofCol(float *data_in, int row, int column, float *value)
{
__shared__ float sum[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum[tid] = 0;
int res = column - (bid + 1)*128;
unsigned int begin_addr = 128*bid + (tid>>7) * column + (tid&127);
unsigned int end_addr = row * column;
if( (tid&127) >= (res + 128))
return;
while(begin_addr < end_addr )
{
sum[tid] += data_in[begin_addr];
begin_addr += 4* column;
}
__syncthreads();
if(tid < 256)
sum[tid] += sum[tid + 256];
__syncthreads();
if(tid < 128)
sum[tid] += sum[tid + 128];
__syncthreads();
if(tid < res+128 && tid < 128)
value[tid + bid * 128] = sum[tid];
}
//ๅฎๆฐ็ฉ้ตๆปๅ
//่ฟ้ๅ
ถๅฎๅ
ๅฏน็ฉ้ต่ฟ่กไบๆ่กๆฑๅๆไฝ
//ๆๅๅฎ็ฐ็ๆฏไธไธชๅๅฏนไธไฝๆฐๆฎ็ๅฝ็บฆๆฑๅๆไฝ
//@param temp_value ่พๅ
ฅๆฐๆฎ
//@param num ๆฐๆฎไธชๆฐ
//@param value ่พๅบๅ
__global__ void Matrix_SumofAll(float *temp_value, int num, float *value)
{
__shared__ float sum[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
sum[tid] = 0;
if( tid >= num)
return;
unsigned int begin_addr, end_addr;
begin_addr = 0;
end_addr = num;
begin_addr += tid;
while(begin_addr < end_addr)
{
sum[tid] += temp_value[begin_addr];
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum[tid] += sum[tid + s];
}
__syncthreads();
}
if(tid == 0)
value[0] = sum[0];
}
//ๅคๆฐๆ่กๆฑๅ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param value ่พๅบ ๅ
__global__ void Matrix_SumofRow(float2 *data_in, int row, int column, float2 *value)
{
__shared__ float sum_R[BLOCK_SIZE];
__shared__ float sum_I[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum_R[tid] = 0;
sum_I[tid] = 0;
if( tid >= column || bid >= row)
return;
unsigned int begin_addr, end_addr;
begin_addr = bid * column;
end_addr = begin_addr + column;
begin_addr += tid;
while(begin_addr < end_addr)
{
float2 temp = data_in[begin_addr];
sum_R[tid] += temp.x;
sum_I[tid] += temp.y;
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum_R[tid] += sum_R[tid + s];
sum_I[tid] += sum_I[tid + s];
}
__syncthreads();
}
if(tid == 0)
{
float2 temp;
temp.x = sum_R[0];
temp.y = sum_I[0];
value[bid] = temp;
}
}
//ๅคๆฐๆๅๆฑๅ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param value ่พๅบ ๅ
__global__ void Matrix_SumofCol(float2 *data_in, int row, int column, float2 *value)
{
__shared__ float sum_I[BLOCK_SIZE];
__shared__ float sum_R[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
sum_I[tid] = 0;
sum_R[tid] = 0;
int res = column - (bid + 1)*128;
unsigned int begin_addr = 128*bid + (tid>>7) * column + (tid&127);
unsigned int end_addr = row * column;
if( (tid&127) >= (res + 128))
return;
while(begin_addr < end_addr )
{
float2 temp = data_in[begin_addr];
sum_R[tid] += temp.x;
sum_I[tid] += temp.y;
begin_addr += 4* column;
}
__syncthreads();
if(tid < 256){
sum_R[tid] += sum_R[tid + 256];
sum_I[tid] += sum_I[tid + 256];
}
__syncthreads();
if(tid < 128){
sum_R[tid] += sum_R[tid + 128];
sum_I[tid] += sum_I[tid + 128];
}
__syncthreads();
if(tid < res+128 && tid < 128){
float2 temp;
temp.x = sum_R[tid];
temp.y = sum_I[tid];
value[tid + bid * 128] = temp;
}
}
//ๅคๆฐๆฑๆปๅ
//@param data_in ่พๅ
ฅ ็ฉ้ต
//@param row ่พๅ
ฅ ็ฉ้ต่ก
//@param column ่พๅ
ฅ ็ฉ้ตๅ
//@param value ่พๅบ ๅ
__global__ void Matrix_SumofAll(float2 *temp_value, int num, float2 *value)
{
__shared__ float sum_I[BLOCK_SIZE];
__shared__ float sum_R[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
sum_R[tid] = 0;
sum_I[tid] = 0;
if( tid >= num)
return;
unsigned int begin_addr, end_addr;
begin_addr = 0;
end_addr = num;
begin_addr += tid;
while(begin_addr < end_addr)
{
float2 temp = temp_value[begin_addr];
sum_R[tid] += temp.x;
sum_I[tid] += temp.y;
begin_addr += blockDim.x;
}
__syncthreads();
for(unsigned int s = blockDim.x/2; s > 0; s >>= 1)
{
if(tid < s)
{
sum_R[tid] += sum_R[tid + s];
sum_I[tid] += sum_I[tid + s];
}
__syncthreads();
}
if(tid == 0){
float2 temp;
temp.x = sum_R[0];
temp.y = sum_I[0];
value[0] = temp;
}
}
//ๅฎๆฐ็ฉ้ตไนๆณ
//ๅบๆฌๆๆณ๏ผๆฏไธชๅ่ด่ดฃไธไธช่พๅบ็ฉ้ตไธญ16*16ๅฐๅ็็ปๆ่ฟ็ฎ
//ๅฉ็จshared memoryๆฅๅฎ็ฐๅฏนglobal memory็ๅๅนถ่ฎฟ้ฎๅๅญๅจ
//@param Matrix_a ่พๅ
ฅ ็ฉ้ตA
//@param row_a ่พๅ
ฅ ็ฉ้ตA่ก
//@param col_a ่พๅ
ฅ ็ฉ้ตAๅ
//@param lda ่พๅ
ฅ ็ฉ้ตAๅจๆพๅญไธญๅฎ้
็ๆฏ่กๅคงๅฐ
//@param Matrix_b ่พๅ
ฅ ็ฉ้ตB
//@param row_b ่พๅ
ฅ ็ฉ้ตB่ก
//@param col_b ่พๅ
ฅ ็ฉ้ตBๅ
//@param ldb ่พๅ
ฅ ็ฉ้ตBๅจๆพๅญไธญๅฎ้
็ๆฏ่กๅคงๅฐ
//@param Matrix_c ่พๅบ ็ฉ้ต A*B
//@param ldc ่พๅ
ฅ ็ฉ้ตCๅจๆพๅญไธญๅฎ้
็ๆฏ่กๅคงๅฐ
__global__ static void Mult_kernel( const float *Matrix_a, int row_a, int col_a, size_t lda,
const float *Matrix_b, int row_b, int col_b, size_t ldb,
float *Matrix_c, size_t ldc)
{
__shared__ float matrix_a[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
__shared__ float matrix_b[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
unsigned int tidr = threadIdx.x;
unsigned int tidc = threadIdx.y;
unsigned int bidr = blockIdx.x * MUL_BLOCK_SIZE;
unsigned int bidc = blockIdx.y * MUL_BLOCK_SIZE;
int i,j;
float result = 0;
float comp = 0;
for(j = 0; j < col_a; j += MUL_BLOCK_SIZE)
{
if(tidr + bidr < row_a && tidc + j < col_a)
matrix_a[tidr][tidc] = Matrix_a[(tidr + bidr) * lda + tidc + j];
else
matrix_a[tidr][tidc] = 0;
if(tidr + j < row_b && tidc + bidc < col_b)
matrix_b[tidr][tidc] = Matrix_b[(tidr + j) * ldb + tidc + bidc];
else
matrix_b[tidr][tidc] = 0;
__syncthreads();
if(!KAHAN_SUMMATION_FORMULA)
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
result += matrix_a[tidr][i] * matrix_b[i][tidc];
}
else
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
float t;
comp -= matrix_a[tidr][i] * matrix_b[i][tidc];
t = result - comp;
comp = (t - result) + comp;
result = t;
}
__syncthreads();
}
if(tidr + bidr < row_a)
Matrix_c[(tidr + bidr) * ldc + tidc + bidc] = result;
}
//ๅคๆฐ็ฉ้ตไนๆณ
//ๅบๆฌๆๆณ๏ผๆฏไธชๅ่ด่ดฃไธไธช่พๅบ็ฉ้ตไธญ16*16ๅฐๅ็็ปๆ่ฟ็ฎ
//ๅฉ็จshared memoryๆฅๅฎ็ฐๅฏนglobal memory็ๅๅนถ่ฎฟ้ฎๅๅญๅจ
//@param Matrix_a ่พๅ
ฅ ็ฉ้ตA
//@param row_a ่พๅ
ฅ ็ฉ้ตA่ก
//@param col_a ่พๅ
ฅ ็ฉ้ตAๅ
//@param lda ่พๅ
ฅ ็ฉ้ตAๅจๆพๅญไธญๅฎ้
็ๆฏ่กๅคงๅฐ
//@param Matrix_b ่พๅ
ฅ ็ฉ้ตB
//@param row_b ่พๅ
ฅ ็ฉ้ตB่ก
//@param col_b ่พๅ
ฅ ็ฉ้ตBๅ
//@param ldb ่พๅ
ฅ ็ฉ้ตBๅจๆพๅญไธญๅฎ้
็ๆฏ่กๅคงๅฐ
//@param Matrix_c ่พๅบ ็ฉ้ต A*B
//@param ldc ่พๅ
ฅ ็ฉ้ตCๅจๆพๅญไธญๅฎ้
็ๆฏ่กๅคงๅฐ
__global__ static void Mult_kernel( const float2 *Matrix_a, int row_a, int col_a, size_t lda,
const float2 *Matrix_b, int row_b, int col_b, size_t ldb,
float2 *Matrix_c, size_t ldc)
{
__shared__ float2 matrix_a[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
__shared__ float2 matrix_b[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE];
unsigned int tidr = threadIdx.x;
unsigned int tidc = threadIdx.y;
unsigned int bidr = blockIdx.x * MUL_BLOCK_SIZE;
unsigned int bidc = blockIdx.y * MUL_BLOCK_SIZE;
int i,j;
float2 result = make_float2(0,0);
float2 comp = make_float2(0,0);
for(j = 0; j < col_a; j += MUL_BLOCK_SIZE)
{
if(tidr + bidr < row_a && tidc + j < col_a)
matrix_a[tidr][tidc] = Matrix_a[(tidr + bidr) * lda + tidc + j];
else
matrix_a[tidr][tidc] = make_float2(0,0);
if(tidr + j < row_b && tidc + bidc < col_b)
matrix_b[tidr][tidc] = Matrix_b[(tidr + j) * ldb + tidc + bidc];
else
matrix_b[tidr][tidc] = make_float2(0,0);
__syncthreads();
if(!KAHAN_SUMMATION_FORMULA)
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
result.x+= matrix_a[tidr][i].x * matrix_b[i][tidc].x;
result.y+= matrix_a[tidr][i].y * matrix_b[i][tidc].y;
}
else
for(i = 0; i < MUL_BLOCK_SIZE; i++)
{
float2 t;
comp.x -= matrix_a[tidr][i].x * matrix_b[i][tidc].x;
comp.y -= matrix_a[tidr][i].y * matrix_b[i][tidc].y;
t.x = result.x - comp.x;
t.y = result.y - comp.y;
comp.x = (t.x - result.x) + comp.x;
comp.y = (t.y - result.y) + comp.y;
result = t;
}
__syncthreads();
}
if(tidr + bidr < row_a)
Matrix_c[(tidr + bidr) * ldc + tidc + bidc] = result;
}
//็ฉ้ต่ฝฌ็ฝฎ
//ๅบๆฌๆๆณ:ๅฉ็จsharedmemoryๅฎ็ฐๅฐๅ16*16็่ฝฌ็ฝฎ ๅฎ็ฐ่ฏปๅๅๅ
ฅ็ๅๅนถ
//@param T ่พๅ
ฅ ็ฉ้ตๆฐๆฎ็ฑปๅ
//@param Matrix_in ่พๅ
ฅ ็ฉ้ตๆฐๆฎ
//@param row ่พๅ
ฅ ็ฉ้ต่กๆฐ
//@param col ่พๅ
ฅ ็ฉ้ตๅๆฐ
//@param Matrix_out ่พๅบ ่ฝฌ็ฝฎๅ็็ฉ้ต
template<class T>
__global__ static void Transpose_kernel(const T * Matrix_in, int row, int col, T * Matrix_out)
{
__shared__ T temp[MUL_BLOCK_SIZE][MUL_BLOCK_SIZE + 1];
unsigned int xIndex = blockIdx.x * MUL_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * MUL_BLOCK_SIZE + threadIdx.y;
if((xIndex < col) && (yIndex < row))
{
unsigned int index_in = yIndex * col + xIndex;
temp[threadIdx.y][threadIdx.x] = Matrix_in[index_in];
}
__syncthreads();
xIndex = MUL_BLOCK_SIZE*blockIdx.y + threadIdx.x;
yIndex = MUL_BLOCK_SIZE*blockIdx.x + threadIdx.y;
if((xIndex < row) && (yIndex < col))
{
unsigned int index_out = yIndex * row + xIndex;
Matrix_out[index_out] = temp[threadIdx.x][threadIdx.y];
}
}
/************************************************************************/
/* ็ฉ้ตๆฑๆๅคงๅผๅๆๅคงๅผๆๅจไฝ็ฝฎ flag = 0 ๆ่กๆฑๆๅคงๅผ flag = 1 ๆๅๆฑๆๅคงๅผ
/* @param data_in : ่พๅ
ฅ ็ฉ้ตๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/* @param row : ่พๅ
ฅ ็ฉ้ต่กๆฐ
/* @param col : ่พๅ
ฅ ็ฉ้ตๅๆฐ
/* @param index : ่พๅบ ่กจ็คบๆฏ่ก/ๆฏๅๆๅคงๅผๆๅจไฝ็ฝฎ
/* @param value : ่พๅบ ่กจ็คบๆฏ่ก/ๆฏๅ็ๆๅคงๅผ
/* @param flag : ่พๅ
ฅ 0 ่กจ็คบๆ่กๆไฝ 1 ่กจ็คบๆๅๆไฝ
/************************************************************************/
extern "C"
void Matrix_Max(float *data_in, int row, int col,
int *index, float *value, int flag)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory;
float *d_data;
int *d_index;
float *d_value;
int res_num = 0;
if(flag == 0)
res_num = row;
else
res_num = col;
CUDA_SAFE_CALL(cudaMalloc((void**)& d_data, sizeof(float)* row * col));
CUDA_SAFE_CALL(cudaMalloc((void**)& d_index, sizeof(int)* res_num));
CUDA_SAFE_CALL(cudaMalloc((void**)& d_value, sizeof(float)* res_num));
memory = sizeof(float)*row*col + sizeof(int)*res_num + sizeof(float)*res_num;
CUDA_SAFE_CALL(cudaMemcpy(d_data, data_in,sizeof(float)*row*col, cudaMemcpyHostToDevice));
if(flag == 0)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 grid_size(row,1,1);
Matrix_MaxofRow<<<grid_size,block_size>>>(d_data, row, col,d_index, d_value);
cutilCheckMsg("kernel launch failure");
}
else
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(col/128+1,1,1);
Matrix_MaxofCol<<<gird_size, block_size>>>(d_data, row, col, d_index, d_value);
cutilCheckMsg("kernel launch failure");
}
CUDA_SAFE_CALL(cudaMemcpy(index, d_index, res_num * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(value, d_value, res_num * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_data));
CUDA_SAFE_CALL(cudaFree(d_index));
CUDA_SAFE_CALL(cudaFree(d_value));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*ๅฎๆฐ็ฉ้ตๆฑๅ flag = 0 ๆ่กๆฑๅ flag = 1 ๆๅๆฑๅ flag = 2 ๅ
จ้จๆฑๅ
/*@param data_in : ่พๅ
ฅ ็ฉ้ตๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row : ่พๅ
ฅ ็ฉ้ต่กๆฐ
/*@param col : ่พๅ
ฅ ็ฉ้ตๅๆฐ
/*@param value : ่พๅบ ๆฏ่ก/ๅๆฑๅพ็ๅ
/*@param flag : ่พๅ
ฅ 0 ๆ่กๆไฝ 1 ๆๅๆไฝ 2 ๅ
จ้จๆฐๆไฝ
/************************************************************************/
extern "C"
void Matrix_Sum(float *data_in, int row, int col, float *value, int flag)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory;
float *d_data;
float *d_value;
int res_num = 0;
if(flag == 0)
res_num = row;
else if(flag == 1)
res_num = col;
else
res_num = 1;
CUDA_SAFE_CALL(cudaMalloc((void**)& d_data, sizeof(float)* row * col));
CUDA_SAFE_CALL(cudaMalloc((void**)& d_value, sizeof(float)* res_num));
memory = sizeof(float)*row*col + sizeof(float)*res_num;
CUDA_SAFE_CALL(cudaMemcpy(d_data, data_in,sizeof(float)*row*col, cudaMemcpyHostToDevice));
if(flag == 0)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 grid_size(row,1,1);
Matrix_SumofRow<<<grid_size,block_size>>>(d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else if(flag == 1)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(col/128+1,1,1);
Matrix_SumofCol<<<gird_size, block_size>>>(d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(row,1,1);
float *temp_value;
cudaMalloc((void**)&temp_value, sizeof(float)* row);
memory += sizeof(float)*row;
Matrix_SumofRow<<<gird_size, block_size>>>(d_data, row, col, temp_value);
cutilCheckMsg("kernel launch failure");
Matrix_SumofAll<<<1,block_size>>>(temp_value,row,d_value);
cutilCheckMsg("kernel launch failure");
}
CUDA_SAFE_CALL(cudaMemcpy(value, d_value, res_num * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_data));
CUDA_SAFE_CALL(cudaFree(d_value));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*ๅคๆฐ็ฉ้ตๆฑๅ flag = 0 ๆ่กๆฑๅ flag = 1 ๆๅๆฑๅ flag = 2 ๅ
จ้จๆฑๅ
/*@param data_in : ่พๅ
ฅ ็ฉ้ตๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row : ่พๅ
ฅ ็ฉ้ต่กๆฐ
/*@param col : ่พๅ
ฅ ็ฉ้ตๅๆฐ
/*@param value : ่พๅบ ๆฏ่ก/ๅๆฑๅพ็ๅ
/*@param flag : ่พๅ
ฅ 0 ๆ่กๆไฝ 1 ๆๅๆไฝ 2 ๅ
จ้จๆฐๆไฝ
/************************************************************************/
extern "C"
void Matrix_SumCom(cuComplex *data_in, int row, int col, cuComplex *value, int flag)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory;
float2 *d_data;
float2 *d_value;
int res_num = 0;
if(flag == 0)
res_num = row;
else if(flag == 1)
res_num = col;
else
res_num = 1;
CUDA_SAFE_CALL(cudaMalloc((void**)& d_data, sizeof(float2)* row * col));
CUDA_SAFE_CALL(cudaMalloc((void**)& d_value, sizeof(float2)* res_num));
memory = sizeof(float2)*row*col + sizeof(float2)*res_num;
CUDA_SAFE_CALL(cudaMemcpy(d_data, data_in,sizeof(float2)*row*col, cudaMemcpyHostToDevice));
if(flag == 0)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 grid_size(row,1,1);
Matrix_SumofRow<<<grid_size,block_size>>>(d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else if(flag == 1)
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(col/128+1,1,1);
Matrix_SumofCol<<<gird_size, block_size>>>(d_data, row, col, d_value);
cutilCheckMsg("kernel launch failure");
}
else
{
dim3 block_size(BLOCK_SIZE,1,1);
dim3 gird_size(row,1,1);
float2 *temp_value;
cudaMalloc((void**)&temp_value, sizeof(float2)* row);
memory += sizeof(float2) * row;
Matrix_SumofRow<<<gird_size, block_size>>>(d_data, row, col, temp_value);
cutilCheckMsg("kernel launch failure");
Matrix_SumofAll<<<1,block_size>>>(temp_value,row,d_value);
cutilCheckMsg("kernel launch failure");
}
CUDA_SAFE_CALL(cudaMemcpy(value, d_value, res_num * sizeof(float2), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_data));
CUDA_SAFE_CALL(cudaFree(d_value));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*ๅฎๆฐ็ฉ้ตไนๆณ
/*@param Matrix_a : ่พๅ
ฅ ็ฉ้ตAๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row_a : ่พๅ
ฅ ็ฉ้ตA่กๆฐ
/*@param col_a : ่พๅ
ฅ ็ฉ้ตAๅๆฐ
/*@param Matrix_b : ่พๅ
ฅ ็ฉ้ตBๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row_b : ่พๅ
ฅ ็ฉ้ตB่กๆฐ
/*@param col_b : ่พๅ
ฅ ็ฉ้ตBๅๆฐ
/*@param Matrix_c : ่พๅบ ็ฉ้ตCๆฐๆฎ
/*@param flag : ่พๅบ 0 ๆฐๆฎๆญฃๅธธ 1 ็ธไน็ฉ้ต่กๅ้จๅน้
/************************************************************************/
extern "C"
void Matrix_Multi(const float *Matrix_a, int row_a, int col_a, const float *Matrix_b, int row_b, int col_b, float *Matrix_c)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory = 0;
float *Matrix_da, *Matrix_db, *Matrix_dc;
dim3 threads(MUL_BLOCK_SIZE,MUL_BLOCK_SIZE);
int block_width = (row_a + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col_b + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width, block_height);
//็ณ่ฏทๆพๅญๆไฝ ไธบไบๆนไพฟ16*16ๅ็ไนๆณๆไฝ ไฝฟ็จcudaMallocPitchๅถๅฎๅฏน้ฝ
//cudaMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind);
//dst ๅคๅถ็ฎๆ ๅฐๅ src ๅคๅถๆบๅฐๅ
//dpitch ๆฏๆ ๅคๅถๅฐ็ฎๆ ๅฐๅ็ๅฎ้
็ๆฏ่กๆฐๆฎๅคงๅฐ(byte)
//spitch ๆฏๆ ๅจๆบๅฐๅไธญ ๅคๅถๅฐ็ธๅบ่ก็ๅฎ้
ๅฐๅๅคงๅฐ(byte)
//width ๆฏๆ ๅคๅถๅฐ็ฎๆ ๅฐๅ็ๆฏ่ก็ๅฎฝๅบฆ(byte)
//height ๆฏๆ ๅคๅถๅฐ็ฎๆ ๅฐๅ็่กๆฐ(ไธช)
//kind ๆฏๆ ๅคๅถ็ฑปๅไบ
size_t pitch_a, pitch_b, pitch_c;
//ไธบไบๅฏน้ฝๅไฝฟๅพ่กๆฏ16็ๅๆฐ ้
็ฝฎ็ณ่ฏท็ๆพๅญๅฏน้ฝ
CUDA_SAFE_CALL(cudaMallocPitch((void**) & Matrix_da, &pitch_a, sizeof(float) * col_a, row_a));
CUDA_SAFE_CALL(cudaMallocPitch((void**) & Matrix_db, &pitch_b, sizeof(float) * col_b, row_b));
CUDA_SAFE_CALL(cudaMallocPitch((void**) & Matrix_dc, &pitch_c, sizeof(float) * col_b, row_a));
memory += pitch_a * row_a + pitch_b * row_b + pitch_c * row_a;
/*cout<<"Pitch_a: "<<pitch_a<<endl;
cout<<"Pitch_b: "<<pitch_b<<endl;
cout<<"Pitch_c: "<<pitch_c<<endl*/;
CUDA_SAFE_CALL(cudaMemcpy2D( Matrix_da, pitch_a, Matrix_a, sizeof(float)*col_a,
sizeof(float)*col_a, row_a, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy2D( Matrix_db, pitch_b, Matrix_b, sizeof(float)*col_b,
sizeof(float)*col_b, row_b, cudaMemcpyHostToDevice));
//Kernel
Mult_kernel<<<blocks,threads>>>(Matrix_da,row_a,col_a,pitch_a/sizeof(float),
Matrix_db,row_b,col_b,pitch_b/sizeof(float),
Matrix_dc,pitch_c/sizeof(float));
cutilCheckMsg("kernel launch failure");
//ๅคๅถ่ฟ็ฎ็ปๆๅฐไธปๆบ็ซฏ
CUDA_SAFE_CALL(cudaMemcpy2D(
Matrix_c, sizeof(float)*col_b, Matrix_dc, pitch_c,
sizeof(float)*col_b, row_a, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(Matrix_da));
CUDA_SAFE_CALL(cudaFree(Matrix_db));
CUDA_SAFE_CALL(cudaFree(Matrix_dc));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*ๅคๆฐ็ฉ้ตไนๆณ
/*@param Matrix_a : ่พๅ
ฅ ็ฉ้ตAๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row_a : ่พๅ
ฅ ็ฉ้ตA่กๆฐ
/*@param col_a : ่พๅ
ฅ ็ฉ้ตAๅๆฐ
/*@param Matrix_b : ่พๅ
ฅ ็ฉ้ตBๆฐๆฎ ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row_b : ่พๅ
ฅ ็ฉ้ตB่กๆฐ
/*@param col_b : ่พๅ
ฅ ็ฉ้ตBๅๆฐ
/*@param Matrix_c : ่พๅบ ็ฉ้ตCๆฐๆฎ
/************************************************************************/
extern "C"
void Matrix_MultiCom(const float2 *Matrix_a, int row_a, int col_a, const float2 *Matrix_b, int row_b, int col_b, float2 *Matrix_c)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory = 0;
float2 *Matrix_da, *Matrix_db, *Matrix_dc;
dim3 threads(MUL_BLOCK_SIZE,MUL_BLOCK_SIZE);
int block_width = (row_a + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col_b + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width, block_height);
//็ณ่ฏทๆพๅญๆไฝ ไธบไบๆนไพฟ16*16ๅ็ไนๆณๆไฝ ไฝฟ็จcudaMallocPitchๅถๅฎๅฏน้ฝ
//cudaMemcpy2D(void *dst, size_t dpitch, const void *src, size_t spitch, size_t width, size_t height, enum cudaMemcpyKind kind);
//dst ๅคๅถ็ฎๆ ๅฐๅ src ๅคๅถๆบๅฐๅ
//dpitch ๆฏๆ ๅคๅถๅฐ็ฎๆ ๅฐๅ็ๅฎ้
็ๆฏ่กๆฐๆฎๅคงๅฐ(byte)
//spitch ๆฏๆ ๅจๆบๅฐๅไธญ ๅคๅถๅฐ็ธๅบ่ก็ๅฎ้
ๅฐๅๅคงๅฐ(byte)
//width ๆฏๆ ๅคๅถๅฐ็ฎๆ ๅฐๅ็ๆฏ่ก็ๅฎฝๅบฆ(byte)
//height ๆฏๆ ๅคๅถๅฐ็ฎๆ ๅฐๅ็่กๆฐ(ไธช)
//kind ๆฏๆ ๅคๅถ็ฑปๅไบ
size_t pitch_a, pitch_b, pitch_c;
//ไธบไบๅฏน้ฝๅไฝฟๅพ่กๆฏ16็ๅๆฐ ้
็ฝฎ็ณ่ฏท็ๆพๅญๅฏน้ฝ
CUDA_SAFE_CALL(cudaMallocPitch((void**) & Matrix_da, &pitch_a, sizeof(float2) * col_a, row_a));
CUDA_SAFE_CALL(cudaMallocPitch((void**) & Matrix_db, &pitch_b, sizeof(float2) * col_b, row_b));
CUDA_SAFE_CALL(cudaMallocPitch((void**) & Matrix_dc, &pitch_c, sizeof(float2) * col_b, row_a));
memory += pitch_a * row_a + pitch_b * row_b + pitch_c * row_a;
//cout<<"Pitch_a: "<<pitch_a<<endl;
//cout<<"Pitch_b: "<<pitch_b<<endl;
//cout<<"Pitch_c: "<<pitch_c<<endl;
CUDA_SAFE_CALL(cudaMemcpy2D( Matrix_da, pitch_a, Matrix_a, sizeof(float2)*col_a,
sizeof(float2)*col_a, row_a, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy2D( Matrix_db, pitch_b, Matrix_b, sizeof(float2)*col_b,
sizeof(float2)*col_b, row_b, cudaMemcpyHostToDevice));
//Kernel
Mult_kernel<<<blocks,threads>>>(Matrix_da,row_a,col_a,pitch_a/sizeof(float2),
Matrix_db,row_b,col_b,pitch_b/sizeof(float2),
Matrix_dc,pitch_c/sizeof(float2));
cutilCheckMsg("kernel launch failure");
CUDA_SAFE_CALL(cudaMemcpy2D(
Matrix_c, sizeof(float2)*col_b, Matrix_dc, pitch_c,
sizeof(float2)*col_b, row_a, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(Matrix_da));
CUDA_SAFE_CALL(cudaFree(Matrix_db));
CUDA_SAFE_CALL(cudaFree(Matrix_dc));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*ๅฎๆฐ็ฉ้ต่ฝฌ็ฝฎ
/*@param Matrix_in : ่พๅ
ฅ ้่ฆ่ฝฌ็ฝฎ็็ฉ้ต ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row : ่พๅ
ฅ ็ฉ้ต่กๆฐ
/*@param col : ่พๅ
ฅ ็ฉ้ตๅๆฐ
/*@param Matrix_out: ่พๅบ ่ฝฌ็ฝฎๅ็็ฉ้ต
/************************************************************************/
extern "C"
void Matrix_Transpose(const float *Matrix_in, int row, int col, float *Matrix_out)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory = 0;
float* Matrix_din, *Matrix_dout;
dim3 threads(MUL_BLOCK_SIZE, MUL_BLOCK_SIZE);
int block_width = (row + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col+ MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width,block_height);
CUDA_SAFE_CALL(cudaMalloc((void**)&Matrix_dout,sizeof(float)*row*col));
CUDA_SAFE_CALL(cudaMalloc((void**)&Matrix_din,sizeof(float)*row*col));
memory += 2 * sizeof(float)*row*col;
CUDA_SAFE_CALL(cudaMemcpy(Matrix_din, Matrix_in, sizeof(float)*row*col,cudaMemcpyHostToDevice));
Transpose_kernel<float><<<blocks, threads>>>(Matrix_din, row, col, Matrix_dout);
cutilCheckMsg("kernel launch failure");
CUDA_SAFE_CALL(cudaMemcpy(Matrix_out, Matrix_dout, sizeof(float)*row*col,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(Matrix_dout));
CUDA_SAFE_CALL(cudaFree(Matrix_din));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
}
/************************************************************************/
/*ๅคๆฐ็ฉ้ต่ฝฌ็ฝฎ
/*@param Matrix_in : ่พๅ
ฅ ้่ฆ่ฝฌ็ฝฎ็็ฉ้ต ๅ
ๅญๆฐๆฎ ไธ็ปด่ฟ็ปญๅญๅจ
/*@param row : ่พๅ
ฅ ็ฉ้ต่กๆฐ
/*@param col : ่พๅ
ฅ ็ฉ้ตๅๆฐ
/*@param Matrix_out: ่พๅบ ่ฝฌ็ฝฎๅ็็ฉ้ต
/************************************************************************/
extern "C"
void Matrix_TransposeCom(const float2 *Matrix_in, int row, int col, float2 *Matrix_out)
{
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float memory = 0;
float2* Matrix_din, *Matrix_dout;
dim3 threads(MUL_BLOCK_SIZE, MUL_BLOCK_SIZE);
int block_width = (row + MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
int block_height = (col+ MUL_BLOCK_SIZE -1)/MUL_BLOCK_SIZE;
dim3 blocks(block_width,block_height);
CUDA_SAFE_CALL(cudaMalloc((void**)&Matrix_dout,sizeof(float2)*row*col));
CUDA_SAFE_CALL(cudaMalloc((void**)&Matrix_din,sizeof(float2)*row*col));
memory += 2 * sizeof(float2)*row*col;
CUDA_SAFE_CALL(cudaMemcpy(Matrix_din, Matrix_in, sizeof(float2)*row*col,cudaMemcpyHostToDevice));
Transpose_kernel<float2><<<blocks, threads>>>(Matrix_din, row, col, Matrix_dout);
cutilCheckMsg("kernel launch failure");
CUDA_SAFE_CALL(cudaMemcpy(Matrix_out, Matrix_dout, sizeof(float2)*row*col,cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(Matrix_dout));
CUDA_SAFE_CALL(cudaFree(Matrix_din));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"ๅฝๆฐ่ฟ่กๆถ้ด: "<<time<<" ms"<<std::endl;
cout<<"ๅฝๆฐไฝฟ็จๆพๅญ: "<<memory/1024<<" KB"<<endl;
} |
728b01d73dbf6b300342a5b42c5d937c62115ccc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <limits.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
__global__ void reduce_min_kernel(const float* const d_curr_in,
float* d_curr_out,
const size_t size)
{
extern __shared__ float sdata[];
int myID = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
if(myID < size) {
sdata[tid] = d_curr_in[myID];
} else {
sdata[tid] = -FLT_MAX;
}
__syncthreads();
if(myID >= size){
return;
}
for(unsigned int s = blockDim.x / 2 ; s>0; s /= 2){
if(tid<s){
sdata[tid] = min(sdata[tid],sdata[tid + s]);
}
__syncthreads(); //ensure all adds at one iteration are done
}
if (tid == 0){
d_curr_out[blockIdx.x] = sdata[0];
}
}
__global__ void reduce_max_kernel(const float* const d_curr_in,
float* d_curr_out,
const size_t size)
{
extern __shared__ float sdata[];
int myID = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
if(myID < size) {
sdata[tid] = d_curr_in[myID];
} else {
sdata[tid] = -FLT_MAX;
}
__syncthreads();
if(myID >= size){
return;
}
for(unsigned int s = blockDim.x / 2 ; s>0; s /= 2){
if(tid<s){
sdata[tid] = max(sdata[tid],sdata[tid + s]);
}
__syncthreads(); //ensure all adds at one iteration are done
}
if (tid == 0){
d_curr_out[blockIdx.x] = sdata[0];
}
}
int get_max_size(int n, int d) {
return (int)ceil( (float)n/(float)d ) + 1;
}
float reduce_minmax(const float* const d_in,
const size_t numRows,
const size_t numCols,
int minmax)
{
const dim3 blockSize(32);
size_t size = numRows * numCols;
float* d_curr_in;
float* d_curr_out;
checkCudaErrors(hipMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(hipMemcpy(d_curr_in, d_in, sizeof(float) * size, hipMemcpyHostToDevice));
size_t current_size = size;
dim3 gridSize(get_max_size(current_size,blockSize.x));
while(1){
checkCudaErrors(hipMalloc(&d_curr_out, sizeof(float) * gridSize.x));
if (minmax == 1){
hipLaunchKernelGGL(( reduce_max_kernel), dim3(gridSize), dim3(blockSize), sizeof(float) * blockSize.x, 0,
d_curr_in,
d_curr_out,
current_size);
}else{
hipLaunchKernelGGL(( reduce_min_kernel), dim3(gridSize), dim3(blockSize), sizeof(float) * blockSize.x, 0,
d_curr_in,
d_curr_out,
current_size);
}
using namespace std;
std::cout << current_size << " " << gridSize.x << std::endl;
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_curr_in));
d_curr_in = d_curr_out;
if (current_size < blockSize.x){
break;
}
current_size = get_max_size(current_size,blockSize.x);
gridSize.x = get_max_size(current_size,blockSize.x);
}
float h_out;
hipMemcpy(&h_out, d_curr_out, sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_curr_out);
return h_out;
}
__global__
void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid]-lum_min) / lum_range) * bin_count;
atomicAdd(&d_bins[bin], 1);
}
__global__
void scan_kernel(unsigned int* d_bins, int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
for(int s = 1; s <= size; s *= 2) {
int spot = mid - s;
unsigned int val = 0;
if(spot >= 0)
val = d_bins[spot];
__syncthreads();
if(spot >= 0)
d_bins[mid] += val;
__syncthreads();
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
min_logLum = reduce_minmax(d_logLuminance, numRows, numCols, 0);
max_logLum = reduce_minmax(d_logLuminance, numRows, numCols, 1);
size_t size = numRows*numCols;
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int)*numBins;
checkCudaErrors(hipMalloc(&d_bins, histo_size));
checkCudaErrors(hipMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
unsigned int h_out[100];
hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("hist out %d\n", h_out[i]);
dim3 scan_block_dim(get_max_size(numBins, thread_dim.x));
hipLaunchKernelGGL(( scan_kernel), dim3(scan_block_dim), dim3(thread_dim), 0, 0, d_bins, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, hipMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("cdf out %d\n", h_out[i]);
hipMemcpy(d_cdf, d_bins, histo_size, hipMemcpyDeviceToDevice);
checkCudaErrors(hipFree(d_bins));
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
}
| 728b01d73dbf6b300342a5b42c5d937c62115ccc.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
#include <limits.h>
#include <float.h>
#include <math.h>
#include <stdio.h>
__global__ void reduce_min_kernel(const float* const d_curr_in,
float* d_curr_out,
const size_t size)
{
extern __shared__ float sdata[];
int myID = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
if(myID < size) {
sdata[tid] = d_curr_in[myID];
} else {
sdata[tid] = -FLT_MAX;
}
__syncthreads();
if(myID >= size){
return;
}
for(unsigned int s = blockDim.x / 2 ; s>0; s /= 2){
if(tid<s){
sdata[tid] = min(sdata[tid],sdata[tid + s]);
}
__syncthreads(); //ensure all adds at one iteration are done
}
if (tid == 0){
d_curr_out[blockIdx.x] = sdata[0];
}
}
__global__ void reduce_max_kernel(const float* const d_curr_in,
float* d_curr_out,
const size_t size)
{
extern __shared__ float sdata[];
int myID = threadIdx.x + blockIdx.x * blockDim.x;
int tid = threadIdx.x;
if(myID < size) {
sdata[tid] = d_curr_in[myID];
} else {
sdata[tid] = -FLT_MAX;
}
__syncthreads();
if(myID >= size){
return;
}
for(unsigned int s = blockDim.x / 2 ; s>0; s /= 2){
if(tid<s){
sdata[tid] = max(sdata[tid],sdata[tid + s]);
}
__syncthreads(); //ensure all adds at one iteration are done
}
if (tid == 0){
d_curr_out[blockIdx.x] = sdata[0];
}
}
int get_max_size(int n, int d) {
return (int)ceil( (float)n/(float)d ) + 1;
}
float reduce_minmax(const float* const d_in,
const size_t numRows,
const size_t numCols,
int minmax)
{
const dim3 blockSize(32);
size_t size = numRows * numCols;
float* d_curr_in;
float* d_curr_out;
checkCudaErrors(cudaMalloc(&d_curr_in, sizeof(float) * size));
checkCudaErrors(cudaMemcpy(d_curr_in, d_in, sizeof(float) * size, cudaMemcpyHostToDevice));
size_t current_size = size;
dim3 gridSize(get_max_size(current_size,blockSize.x));
while(1){
checkCudaErrors(cudaMalloc(&d_curr_out, sizeof(float) * gridSize.x));
if (minmax == 1){
reduce_max_kernel<<<gridSize, blockSize, sizeof(float) * blockSize.x>>>(
d_curr_in,
d_curr_out,
current_size);
}else{
reduce_min_kernel<<<gridSize, blockSize, sizeof(float) * blockSize.x>>>(
d_curr_in,
d_curr_out,
current_size);
}
using namespace std;
std::cout << current_size << " " << gridSize.x << std::endl;
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_curr_in));
d_curr_in = d_curr_out;
if (current_size < blockSize.x){
break;
}
current_size = get_max_size(current_size,blockSize.x);
gridSize.x = get_max_size(current_size,blockSize.x);
}
float h_out;
cudaMemcpy(&h_out, d_curr_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_curr_out);
return h_out;
}
__global__
void histogram_kernel(unsigned int* d_bins, const float* d_in, const int bin_count, const float lum_min, const float lum_max, const int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
float lum_range = lum_max - lum_min;
int bin = ((d_in[mid]-lum_min) / lum_range) * bin_count;
atomicAdd(&d_bins[bin], 1);
}
__global__
void scan_kernel(unsigned int* d_bins, int size) {
int mid = threadIdx.x + blockDim.x * blockIdx.x;
if(mid >= size)
return;
for(int s = 1; s <= size; s *= 2) {
int spot = mid - s;
unsigned int val = 0;
if(spot >= 0)
val = d_bins[spot];
__syncthreads();
if(spot >= 0)
d_bins[mid] += val;
__syncthreads();
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
min_logLum = reduce_minmax(d_logLuminance, numRows, numCols, 0);
max_logLum = reduce_minmax(d_logLuminance, numRows, numCols, 1);
size_t size = numRows*numCols;
unsigned int* d_bins;
size_t histo_size = sizeof(unsigned int)*numBins;
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(size, thread_dim.x));
histogram_kernel<<<hist_block_dim, thread_dim>>>(d_bins, d_logLuminance, numBins, min_logLum, max_logLum, size);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
unsigned int h_out[100];
cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("hist out %d\n", h_out[i]);
dim3 scan_block_dim(get_max_size(numBins, thread_dim.x));
scan_kernel<<<scan_block_dim, thread_dim>>>(d_bins, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
cudaMemcpy(&h_out, d_bins, sizeof(unsigned int)*100, cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
printf("cdf out %d\n", h_out[i]);
cudaMemcpy(d_cdf, d_bins, histo_size, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaFree(d_bins));
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
}
|
6c3544d33c7ce8290e0385979f17c92fb288adf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaKernel(int n, double* gpuWeights, int* gpuG, int* gpuTempGrid, int* flag)
{
// Moment's coordinates in the grid //
int momentRow = blockIdx.y*blockDim.y + threadIdx.y;
int momentCol = blockIdx.x*blockDim.x + threadIdx.x;
int gridRowIdx, gridColIdx;
// Variable storing the total neighbourhood influence //
double weightFactor = 0.0;
// Check if coordinates are valid //
if(momentRow < n && momentCol < n){
// Read 24 neighbours of every moment and calculate their total influence //
for(int row=0; row<5; row++)
{
for(int col=0; col<5; col++)
{
if(row==2 && col==2)
continue;
// Calculate neighbour's coordinates in G //
// using modulus to satisfy boundary conditions //
gridRowIdx = (row - 2 + momentRow + n) % n;
gridColIdx = (col - 2 + momentCol + n) % n;
weightFactor+= gpuG[gridRowIdx * n + gridColIdx] * gpuWeights[row*5+col];
}
}
// Update moment's atomic spin //
// Set flag if a spin value transition has been done //
if(weightFactor < 0.0001 && weightFactor > -0.0001)
{
gpuTempGrid[n*momentRow+momentCol] = gpuG[n*momentRow+momentCol];
}else if(weightFactor > 0.00001)
{
gpuTempGrid[n*momentRow+momentCol] = 1;
if (gpuG[n*momentRow+momentCol] == -1)
{
*flag = 1;
}
}else
{
gpuTempGrid[n*momentRow+momentCol] = -1;
if (gpuG[n*momentRow+momentCol] == 1)
{
*flag = 1;
}
}
}
} | 6c3544d33c7ce8290e0385979f17c92fb288adf0.cu | #include "includes.h"
__global__ void cudaKernel(int n, double* gpuWeights, int* gpuG, int* gpuTempGrid, int* flag)
{
// Moment's coordinates in the grid //
int momentRow = blockIdx.y*blockDim.y + threadIdx.y;
int momentCol = blockIdx.x*blockDim.x + threadIdx.x;
int gridRowIdx, gridColIdx;
// Variable storing the total neighbourhood influence //
double weightFactor = 0.0;
// Check if coordinates are valid //
if(momentRow < n && momentCol < n){
// Read 24 neighbours of every moment and calculate their total influence //
for(int row=0; row<5; row++)
{
for(int col=0; col<5; col++)
{
if(row==2 && col==2)
continue;
// Calculate neighbour's coordinates in G //
// using modulus to satisfy boundary conditions //
gridRowIdx = (row - 2 + momentRow + n) % n;
gridColIdx = (col - 2 + momentCol + n) % n;
weightFactor+= gpuG[gridRowIdx * n + gridColIdx] * gpuWeights[row*5+col];
}
}
// Update moment's atomic spin //
// Set flag if a spin value transition has been done //
if(weightFactor < 0.0001 && weightFactor > -0.0001)
{
gpuTempGrid[n*momentRow+momentCol] = gpuG[n*momentRow+momentCol];
}else if(weightFactor > 0.00001)
{
gpuTempGrid[n*momentRow+momentCol] = 1;
if (gpuG[n*momentRow+momentCol] == -1)
{
*flag = 1;
}
}else
{
gpuTempGrid[n*momentRow+momentCol] = -1;
if (gpuG[n*momentRow+momentCol] == 1)
{
*flag = 1;
}
}
}
} |
407f8d0dc05abc3768e42f83ffb046437b61f434.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
#include <hipfft.h>
template<typename T> __global__ void normalize_ifft(T* data, double divider, int length);
template<typename T> __global__ void psf_from_fft(hipfftDoubleComplex* v, int N, int n, T* real_fft, T* result);
template<typename T> __global__ void add_and_divide_cut_complex(hipfftDoubleComplex* Numer1, hipfftDoubleComplex* fft2_rhs, T* Denom, int fft2_rows, int fft2_cols, hipfftDoubleComplex* copyArray);
void check_cufft(hipfftResult status);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]){
mxGPUArray *Numer1, *rhs, *Denom, *U, *NrhsD;
hipfftDoubleComplex* d_Numer1, *d_NrhsD;
double *d_rhs, *d_Denom, *d_U;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if ((nrhs < 4) || !(mxIsGPUArray(prhs[0])) || !(mxIsGPUArray(prhs[1])) || !(mxIsGPUArray(prhs[2]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
Numer1 = mxGPUCopyFromMxArray (prhs[0]);
rhs = mxGPUCopyFromMxArray (prhs[1]);
Denom = mxGPUCopyFromMxArray (prhs[2]);
int N = mxGetScalar(prhs[3]);
if ((mxGPUGetClassID(Numer1) != mxDOUBLE_CLASS) ||(mxGPUGetClassID(rhs) != mxDOUBLE_CLASS) || (mxGPUGetClassID(Denom) != mxDOUBLE_CLASS)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_Numer1 = (hipfftDoubleComplex *)(mxGPUGetData(Numer1));
d_rhs = (double *)(mxGPUGetData(rhs));
d_Denom = (double *)(mxGPUGetData(Denom));
U = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(Denom), mxGPUGetDimensions(Denom), mxGPUGetClassID(Denom), mxGPUGetComplexity(Denom),
MX_GPU_DO_NOT_INITIALIZE);
d_U = (double *)(mxGPUGetData(U));
NrhsD = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(Numer1), mxGPUGetDimensions(Numer1), mxGPUGetClassID(Numer1), mxGPUGetComplexity(Numer1),
MX_GPU_INITIALIZE_VALUES);
d_NrhsD = (hipfftDoubleComplex *)(mxGPUGetData(NrhsD));
int threads, blocks;
hipError_t status;
// --------------- REC BREGMAN METHODS STARTS HERE -------------------------------------------
/*
hipfftHandle plan;
hipfftDoubleComplex *output;
double prd[2] = {0.57, -0.57};
double * input, *temp_worksapce;
int n = (N/2)+1;
//---------------------- MEMORY ALLOCATION
checkCudaErrors(hipMalloc((void**)&output, sizeof(hipfftDoubleComplex)*n));
hipMalloc((void**)&input, sizeof(double)*N);
hipMemset(input, 0, sizeof(double)*N);
hipMalloc((void**)&temp_worksapce, sizeof(double)*N);
hipMemcpy(input, &prd[0], sizeof(double)*2, hipMemcpyHostToDevice);
// cufft plan initialization
if (hipfftPlan1d(&plan, N, HIPFFT_D2Z, 1) != HIPFFT_SUCCESS){ // N - number of input samples (length of input data)
mexErrMsgIdAndTxt(errId, "plan initialization failed, cufft error code\n");
}
// exec
if (hipfftExecD2Z(plan, input, output) != HIPFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft exec failed, cufft error code\n");
}
threads = N;
blocks = N;
int shared_mem_size = N*sizeof(double);
psf_from_fft<<<threads, blocks, shared_mem_size>>>(output, N, n, temp_worksapce, d_U);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
*/
// -------------------------------------- U = IFFT2((Numer1 + FFT2(rhs))./Denom) --------------------------------------------
// --------------------------------------- CUFFT PLAN INITIALIZATION -------------------------------
hipfftHandle plan2d, plan_ifft;
int fft2_output_size = N*(N/2+1);
hipfftDoubleComplex* fft2_rhs;
hipMalloc((void**)&fft2_rhs, sizeof(hipfftDoubleComplex)*fft2_output_size);
if(hipfftPlan2d(&plan2d, N, N, HIPFFT_D2Z) != HIPFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "plan2d initialization failed, cufft error code\n");
}
if(hipfftPlan2d(&plan_ifft, N, N, HIPFFT_Z2D) != HIPFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "ifft plan initialization failed, cufft error code\n");
}
// --------------------------------------- FFT2 EXECUTION -------------------------------
// ftt(rhs)
if(hipfftExecD2Z(plan2d, d_rhs, fft2_rhs) != HIPFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft fft2 exec failed, cufft error code\n");
}
//hipMemcpy(d_NrhsD, fft2_rhs, sizeof(hipfftDoubleComplex)*fft2_output_size, hipMemcpyDeviceToDevice);
// ------------------------------------- (Numer1 + fft2(rhs))./Denom
threads = N;
blocks = N/2+1;
hipLaunchKernelGGL(( add_and_divide_cut_complex), dim3(blocks), dim3(threads), 0, 0, d_Numer1, fft2_rhs, d_Denom, N, N/2+1, d_NrhsD);
hipDeviceSynchronize();
status = hipGetLastError();
if(status != hipSuccess){
mexErrMsgIdAndTxt(errId, "cuda kernel error code %d\n", status);
}
// inverse fft ifft2((Numer1 + fft2(rhs))./Denom)
if(hipfftExecZ2D(plan_ifft, fft2_rhs, d_U) != HIPFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft exec failed, cufft error code\n");
}
// normalize ifft2
threads = N; blocks = N;
hipLaunchKernelGGL(( normalize_ifft), dim3(threads), dim3(blocks), 0, 0, d_U, N*N, N*N);
hipDeviceSynchronize();
status = hipGetLastError();
if(status != hipSuccess){
mexErrMsgIdAndTxt(errId, "cuda error while normalize ifft2 code %d\n", status);
}
// END OF MY CODE
plhs[0] = mxGPUCreateMxArrayOnGPU(U);
plhs[1] = mxGPUCreateMxArrayOnGPU(NrhsD);
mxGPUDestroyGPUArray(Numer1);
mxGPUDestroyGPUArray(rhs);
mxGPUDestroyGPUArray(Denom);
mxGPUDestroyGPUArray(U);
mxGPUDestroyGPUArray(NrhsD);
hipfftDestroy(plan2d);
hipfftDestroy(plan_ifft);
hipFree(fft2_rhs);
/*
hipfftDestroy(plan);
hipFree(output);
hipFree(input);
hipFree(temp_worksapce);
*/
}
template<typename T> __global__ void normalize_ifft(T* data, double divider, int length){
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < length){
data[index] /= divider;
}
}
template<typename T>
__global__ void psf_from_fft(hipfftDoubleComplex* v, int N, int n, T* real_fft, T* result){
// blocks are rows which cover the whole length, usually the matrix is of size 512x512, the NVIDIA GPU's allows maximum number
// of threads per block of 1024
if (threadIdx.x >= n && threadIdx.x < N){
v[threadIdx.x] = v[N-threadIdx.x];
}
if (threadIdx.x < N){
hipfftDoubleComplex a = v[threadIdx.x];
real_fft[threadIdx.x] = pow(a.x, 2.0) + pow(a.y, 2.0);
}
if (blockIdx.x < N && threadIdx.x < N){
extern __shared__ double v_shared[];
v_shared[threadIdx.x] = real_fft[threadIdx.x];
result[threadIdx.x+blockIdx.x*N] = v_shared[threadIdx.x] + v_shared[blockIdx.x];
}
}
template<typename T>
__global__ void add_and_divide_cut_complex(hipfftDoubleComplex* Numer1, hipfftDoubleComplex* fft2_rhs, T* Denom, int fft2_rows, int fft2_cols, hipfftDoubleComplex* copyArray){
// IMPORTANT: Numer1 and Denom are matrices of size rows x cols, but fft2_rhs was computed by cufft in R2C plan so its total length is N*(N/2+1);
// operation: (Numer1 + fft2(rhs))./Denom
/*
Similar to the one-dimensional case, the frequency domain representation of real-valued
input data satisfies Hermitian symmetry, defined as: X(n1, n2, ..., nd) = X*(N1-n1,N2-n2,...,Nd-nd)
for two dimensional fft, i.e. fft2 on NxM matrix indexing is the following: X(n,m) = X*(N-n, M-m);
the length of fft2 done by cufft from NxM is: N*(M/2+1);
kernel run configuration should be fitted to this size N*(M/2+1)
*/
//if (threadIdx.x < fft2_rows && blockIdx.x < fft2_cols){
int difference = fft2_rows - fft2_cols;
int index = threadIdx.x + blockIdx.x*fft2_rows;
int index2 = index + index/fft2_cols*difference;//difference*blockIdx.x + difference*(threadIdx.x >= fft2_cols);
hipfftDoubleComplex t = fft2_rhs[index];
hipfftDoubleComplex n = Numer1[index2];
copyArray[index2] = t;
t.x += n.x;
t.y += n.y;
// UWAGA CO JESLI JEST DZIELENIE PRZEZ ZERO??
double divider = Denom[index2];
if (divider != 0.0){
t.x /= divider;
t.y /= divider;
fft2_rhs[index] = t;
}
//}
}
void check_cufft(hipfftResult status){
char const * const errId = "parallel:gpu:mexGPUExample:CufftError";
if (status != HIPFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft error code %d\n", status);
}
} | 407f8d0dc05abc3768e42f83ffb046437b61f434.cu |
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "matrix.h"
#include <cufft.h>
template<typename T> __global__ void normalize_ifft(T* data, double divider, int length);
template<typename T> __global__ void psf_from_fft(cufftDoubleComplex* v, int N, int n, T* real_fft, T* result);
template<typename T> __global__ void add_and_divide_cut_complex(cufftDoubleComplex* Numer1, cufftDoubleComplex* fft2_rhs, T* Denom, int fft2_rows, int fft2_cols, cufftDoubleComplex* copyArray);
void check_cufft(cufftResult status);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]){
mxGPUArray *Numer1, *rhs, *Denom, *U, *NrhsD;
cufftDoubleComplex* d_Numer1, *d_NrhsD;
double *d_rhs, *d_Denom, *d_U;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if ((nrhs < 4) || !(mxIsGPUArray(prhs[0])) || !(mxIsGPUArray(prhs[1])) || !(mxIsGPUArray(prhs[2]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
Numer1 = mxGPUCopyFromMxArray (prhs[0]);
rhs = mxGPUCopyFromMxArray (prhs[1]);
Denom = mxGPUCopyFromMxArray (prhs[2]);
int N = mxGetScalar(prhs[3]);
if ((mxGPUGetClassID(Numer1) != mxDOUBLE_CLASS) ||(mxGPUGetClassID(rhs) != mxDOUBLE_CLASS) || (mxGPUGetClassID(Denom) != mxDOUBLE_CLASS)) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_Numer1 = (cufftDoubleComplex *)(mxGPUGetData(Numer1));
d_rhs = (double *)(mxGPUGetData(rhs));
d_Denom = (double *)(mxGPUGetData(Denom));
U = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(Denom), mxGPUGetDimensions(Denom), mxGPUGetClassID(Denom), mxGPUGetComplexity(Denom),
MX_GPU_DO_NOT_INITIALIZE);
d_U = (double *)(mxGPUGetData(U));
NrhsD = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(Numer1), mxGPUGetDimensions(Numer1), mxGPUGetClassID(Numer1), mxGPUGetComplexity(Numer1),
MX_GPU_INITIALIZE_VALUES);
d_NrhsD = (cufftDoubleComplex *)(mxGPUGetData(NrhsD));
int threads, blocks;
cudaError_t status;
// --------------- REC BREGMAN METHODS STARTS HERE -------------------------------------------
/*
cufftHandle plan;
cufftDoubleComplex *output;
double prd[2] = {0.57, -0.57};
double * input, *temp_worksapce;
int n = (N/2)+1;
//---------------------- MEMORY ALLOCATION
checkCudaErrors(cudaMalloc((void**)&output, sizeof(cufftDoubleComplex)*n));
cudaMalloc((void**)&input, sizeof(double)*N);
cudaMemset(input, 0, sizeof(double)*N);
cudaMalloc((void**)&temp_worksapce, sizeof(double)*N);
cudaMemcpy(input, &prd[0], sizeof(double)*2, cudaMemcpyHostToDevice);
// cufft plan initialization
if (cufftPlan1d(&plan, N, CUFFT_D2Z, 1) != CUFFT_SUCCESS){ // N - number of input samples (length of input data)
mexErrMsgIdAndTxt(errId, "plan initialization failed, cufft error code\n");
}
// exec
if (cufftExecD2Z(plan, input, output) != CUFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft exec failed, cufft error code\n");
}
threads = N;
blocks = N;
int shared_mem_size = N*sizeof(double);
psf_from_fft<<<threads, blocks, shared_mem_size>>>(output, N, n, temp_worksapce, d_U);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
*/
// -------------------------------------- U = IFFT2((Numer1 + FFT2(rhs))./Denom) --------------------------------------------
// --------------------------------------- CUFFT PLAN INITIALIZATION -------------------------------
cufftHandle plan2d, plan_ifft;
int fft2_output_size = N*(N/2+1);
cufftDoubleComplex* fft2_rhs;
cudaMalloc((void**)&fft2_rhs, sizeof(cufftDoubleComplex)*fft2_output_size);
if(cufftPlan2d(&plan2d, N, N, CUFFT_D2Z) != CUFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "plan2d initialization failed, cufft error code\n");
}
if(cufftPlan2d(&plan_ifft, N, N, CUFFT_Z2D) != CUFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "ifft plan initialization failed, cufft error code\n");
}
// --------------------------------------- FFT2 EXECUTION -------------------------------
// ftt(rhs)
if(cufftExecD2Z(plan2d, d_rhs, fft2_rhs) != CUFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft fft2 exec failed, cufft error code\n");
}
//cudaMemcpy(d_NrhsD, fft2_rhs, sizeof(cufftDoubleComplex)*fft2_output_size, cudaMemcpyDeviceToDevice);
// ------------------------------------- (Numer1 + fft2(rhs))./Denom
threads = N;
blocks = N/2+1;
add_and_divide_cut_complex<<<blocks, threads>>>(d_Numer1, fft2_rhs, d_Denom, N, N/2+1, d_NrhsD);
cudaDeviceSynchronize();
status = cudaGetLastError();
if(status != cudaSuccess){
mexErrMsgIdAndTxt(errId, "cuda kernel error code %d\n", status);
}
// inverse fft ifft2((Numer1 + fft2(rhs))./Denom)
if(cufftExecZ2D(plan_ifft, fft2_rhs, d_U) != CUFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft exec failed, cufft error code\n");
}
// normalize ifft2
threads = N; blocks = N;
normalize_ifft<<<threads, blocks>>>(d_U, N*N, N*N);
cudaDeviceSynchronize();
status = cudaGetLastError();
if(status != cudaSuccess){
mexErrMsgIdAndTxt(errId, "cuda error while normalize ifft2 code %d\n", status);
}
// END OF MY CODE
plhs[0] = mxGPUCreateMxArrayOnGPU(U);
plhs[1] = mxGPUCreateMxArrayOnGPU(NrhsD);
mxGPUDestroyGPUArray(Numer1);
mxGPUDestroyGPUArray(rhs);
mxGPUDestroyGPUArray(Denom);
mxGPUDestroyGPUArray(U);
mxGPUDestroyGPUArray(NrhsD);
cufftDestroy(plan2d);
cufftDestroy(plan_ifft);
cudaFree(fft2_rhs);
/*
cufftDestroy(plan);
cudaFree(output);
cudaFree(input);
cudaFree(temp_worksapce);
*/
}
template<typename T> __global__ void normalize_ifft(T* data, double divider, int length){
int index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < length){
data[index] /= divider;
}
}
template<typename T>
__global__ void psf_from_fft(cufftDoubleComplex* v, int N, int n, T* real_fft, T* result){
// blocks are rows which cover the whole length, usually the matrix is of size 512x512, the NVIDIA GPU's allows maximum number
// of threads per block of 1024
if (threadIdx.x >= n && threadIdx.x < N){
v[threadIdx.x] = v[N-threadIdx.x];
}
if (threadIdx.x < N){
cufftDoubleComplex a = v[threadIdx.x];
real_fft[threadIdx.x] = pow(a.x, 2.0) + pow(a.y, 2.0);
}
if (blockIdx.x < N && threadIdx.x < N){
extern __shared__ double v_shared[];
v_shared[threadIdx.x] = real_fft[threadIdx.x];
result[threadIdx.x+blockIdx.x*N] = v_shared[threadIdx.x] + v_shared[blockIdx.x];
}
}
template<typename T>
__global__ void add_and_divide_cut_complex(cufftDoubleComplex* Numer1, cufftDoubleComplex* fft2_rhs, T* Denom, int fft2_rows, int fft2_cols, cufftDoubleComplex* copyArray){
// IMPORTANT: Numer1 and Denom are matrices of size rows x cols, but fft2_rhs was computed by cufft in R2C plan so its total length is N*(N/2+1);
// operation: (Numer1 + fft2(rhs))./Denom
/*
Similar to the one-dimensional case, the frequency domain representation of real-valued
input data satisfies Hermitian symmetry, defined as: X(n1, n2, ..., nd) = X*(N1-n1,N2-n2,...,Nd-nd)
for two dimensional fft, i.e. fft2 on NxM matrix indexing is the following: X(n,m) = X*(N-n, M-m);
the length of fft2 done by cufft from NxM is: N*(M/2+1);
kernel run configuration should be fitted to this size N*(M/2+1)
*/
//if (threadIdx.x < fft2_rows && blockIdx.x < fft2_cols){
int difference = fft2_rows - fft2_cols;
int index = threadIdx.x + blockIdx.x*fft2_rows;
int index2 = index + index/fft2_cols*difference;//difference*blockIdx.x + difference*(threadIdx.x >= fft2_cols);
cufftDoubleComplex t = fft2_rhs[index];
cufftDoubleComplex n = Numer1[index2];
copyArray[index2] = t;
t.x += n.x;
t.y += n.y;
// UWAGA CO JESLI JEST DZIELENIE PRZEZ ZERO??
double divider = Denom[index2];
if (divider != 0.0){
t.x /= divider;
t.y /= divider;
fft2_rhs[index] = t;
}
//}
}
void check_cufft(cufftResult status){
char const * const errId = "parallel:gpu:mexGPUExample:CufftError";
if (status != CUFFT_SUCCESS){
mexErrMsgIdAndTxt(errId, "cufft error code %d\n", status);
}
} |
reduction_d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_ELEMENT 100000
#define BLOCK_SIZE 32
#define GRID_SIZE ((NUM_ELEMENT + BLOCK_SIZE - 1) / BLOCK_SIZE)
#define WARP_SIZE 32
#define SHARED_SIZE (BLOCK_SIZE * 4)
__global__ void reduction_max(int *Arr, int *Max){
extern __shared__ int sharedmem[];
int tid = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sharedmem[tid] = Arr[id];
__syncthreads();
for (int i = 1; i < blockDim.x; i *= 2){
if ((tid / WARP_SIZE) % (2*i) == 0){
if (sharedmem[tid] < sharedmem[tid + i])
sharedmem[tid] = sharedmem[tid + i];
}
__syncthreads();
}
if (tid == 0)
Max[0] = sharedmem[0];
}
int main(){
int* arr;
int* d_arr, *d_max;
int max = 0;
hipEvent_t start, end;
float etime;
dim3 block(BLOCK_SIZE);
dim3 grid(GRID_SIZE);
hipEventCreate(&start);
hipEventCreate(&end);
srand(time(NULL));
// random number creation
arr = (int*)malloc(sizeof(int) * NUM_ELEMENT);
for (int i = 0; i < NUM_ELEMENT; i++)
arr[i] = rand() % (NUM_ELEMENT * 10);
// tmp print
//for (int i = 0; i < NUM_ELEMENT; i++)
// printf("%d\n", arr[i]);
// cuda var initialization
hipMalloc((void**)&d_arr, sizeof(int)*NUM_ELEMENT);
hipMalloc((void**)&d_max, sizeof(int));
hipMemcpy(d_arr, arr, sizeof(int)*NUM_ELEMENT, hipMemcpyHostToDevice);
// kernel call & exec time check
hipEventRecord(start, 0);
hipLaunchKernelGGL(( reduction_max), dim3(grid), dim3(block), SHARED_SIZE, 0, d_arr, d_max);
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&etime, start, end);
hipMemcpy(&max, d_max, sizeof(int), hipMemcpyDeviceToHost);
printf("MAX NUM : %d\n", max);
printf("EXEC TIME : %f ms\n", etime);
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(d_arr);
hipFree(d_max);
return 0;
} | reduction_d.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_ELEMENT 100000
#define BLOCK_SIZE 32
#define GRID_SIZE ((NUM_ELEMENT + BLOCK_SIZE - 1) / BLOCK_SIZE)
#define WARP_SIZE 32
#define SHARED_SIZE (BLOCK_SIZE * 4)
__global__ void reduction_max(int *Arr, int *Max){
extern __shared__ int sharedmem[];
int tid = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sharedmem[tid] = Arr[id];
__syncthreads();
for (int i = 1; i < blockDim.x; i *= 2){
if ((tid / WARP_SIZE) % (2*i) == 0){
if (sharedmem[tid] < sharedmem[tid + i])
sharedmem[tid] = sharedmem[tid + i];
}
__syncthreads();
}
if (tid == 0)
Max[0] = sharedmem[0];
}
int main(){
int* arr;
int* d_arr, *d_max;
int max = 0;
cudaEvent_t start, end;
float etime;
dim3 block(BLOCK_SIZE);
dim3 grid(GRID_SIZE);
cudaEventCreate(&start);
cudaEventCreate(&end);
srand(time(NULL));
// random number creation
arr = (int*)malloc(sizeof(int) * NUM_ELEMENT);
for (int i = 0; i < NUM_ELEMENT; i++)
arr[i] = rand() % (NUM_ELEMENT * 10);
// tmp print
//for (int i = 0; i < NUM_ELEMENT; i++)
// printf("%d\n", arr[i]);
// cuda var initialization
cudaMalloc((void**)&d_arr, sizeof(int)*NUM_ELEMENT);
cudaMalloc((void**)&d_max, sizeof(int));
cudaMemcpy(d_arr, arr, sizeof(int)*NUM_ELEMENT, cudaMemcpyHostToDevice);
// kernel call & exec time check
cudaEventRecord(start, 0);
reduction_max<<<grid, block, SHARED_SIZE>>>(d_arr, d_max);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&etime, start, end);
cudaMemcpy(&max, d_max, sizeof(int), cudaMemcpyDeviceToHost);
printf("MAX NUM : %d\n", max);
printf("EXEC TIME : %f ms\n", etime);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_arr);
cudaFree(d_max);
return 0;
} |
b0d49d11457ab9b4c916184e68a7d4e703d23dda.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
const int max_shared_size = 128;
namespace output {
const int OutputBufferSize = 1e6+5;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x<0) print('-'), x=-x;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld x) {
// printf("%.2f", x);
static char buf[100];
sprintf(buf, "%.2f", x);
print(buf);
}
}
struct ios {
static const int IN_LEN=1<<18|1;
char buf[IN_LEN],*s,*t;
inline char read(){
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++;
}
inline bool isEOF() {
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t;
}
inline ios & operator >> (int &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios & operator >> (LL &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios &operator >> (char *s) {
int len = 0;
char ch;
for (ch=read(); ch=='\n' || ch == ' '; ch=read());
if (ch == -1) {
s[len] = 0;
return *this;
}
for (; ch!='\n' && ch != ' ' && ch != -1;ch=read())
s[len++] = ch;
s[len] = 0;
return *this;
}
inline ios &operator>>(ld &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
inline ios &operator>>(long double &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
} io;
inline void handleCudaError(hipError_t err, string name = "fuck") {
if (err != hipSuccess) {
cerr << name << endl;
cerr << hipGetErrorString(err) << endl;
exit(0);
}
}
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix");
handleCudaError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), "memcpy in copyMatrix");
}
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bm, j = index % bm;
if (i >= an || j >= bm) return;
ld sum = 0;
if (i < an && j < bm) {
for (int k=0; k<am; ++k)
sum += d_a[i * am + k] * d_b[k * bm + j];
}
if (i * bm + j < an * bm)
d_c[i * bm + j] = sum;
}
__global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bn, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bn, j = index % bn;
if (i >= an || j >= bn) return;
ld sum = 0;
for (int k=0; k<am; ++k)
sum += d_a[i*am + k] * d_b[j*am+k];
d_c[i * bn + j] = sum;
}
__global__ void matrixMult3(ld *d_a, ld *d_b, ld *d_c, int an, int bn, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bn, j = index % bn;
if (i >= an || j >= bn) return;
// int shareda = min(am, max_shared_size);
// __shared__ ld c_a[max_shared_size];
int shareda = 0;
int baseb = j * am, basea = i * am;
// for (int k=0; k<shareda; ++k) {
// c_a[k] = d_a[basea+k];
// }
// __syncthreads();
ld sum = 0;
// for (int k=0; k<shareda; ++k) {
// sum += c_a[basea + k] * d_b[baseb+k];
}
for (int k=shareda; k<am; ++k)
sum += d_a[basea + k] * d_b[baseb + k];
d_c[i * bn + j] = sum;
}
void outputMatrix(ld *a, int n, int m) {
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
void hostMatrixInput(ld **a, int &n, int &m, bool transpose = false) {
io >> n >> m;
*a = (ld*)malloc(sizeof(ld) * n * m);
if (!transpose)
for (int i=0; i<n; ++i) {
int st = i*m, ed = st+m;
for (int j=st; j<ed; ++j) {
io >> (*a)[j];
}
}
else {
for (int i=0; i<n; ++i) {
for (int j=0; j<m; ++j) {
io >> (*a)[j * n + i];
}
}
swap(n, m);
}
}
int main()
{
// #ifndef Weaverzhu
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
cerr << prop.name << endl;
hostMatrixInput(&h_a, an, am);
hostMatrixInput(&h_b, bn, bm, true);
n = an;
m = bn;
int block_size = prop.maxThreadsPerBlock, grids = (n * m + block_size - 1) / block_size;
copyMatrix(h_a, d_a, an, am);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(hipMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c");
// cerr << an << ' ' << am << ' ' << bn << ' ' << bm << ' ' << endl;
hipLaunchKernelGGL(( matrixMult2), dim3(grids), dim3(block_size), 0, 0, d_a, d_b, d_c, an, bn, am);
h_c = (ld*)malloc(sizeof(ld) * n * m);
int size = sizeof(ld) * n * m;
handleCudaError(hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost), "memcpy back");
outputMatrix(h_c, n, m);
output::flush();
return 0;
}
| b0d49d11457ab9b4c916184e68a7d4e703d23dda.cu | #include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
const int max_shared_size = 128;
namespace output {
const int OutputBufferSize = 1e6+5;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x<0) print('-'), x=-x;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld x) {
// printf("%.2f", x);
static char buf[100];
sprintf(buf, "%.2f", x);
print(buf);
}
}
struct ios {
static const int IN_LEN=1<<18|1;
char buf[IN_LEN],*s,*t;
inline char read(){
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++;
}
inline bool isEOF() {
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t;
}
inline ios & operator >> (int &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios & operator >> (LL &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios &operator >> (char *s) {
int len = 0;
char ch;
for (ch=read(); ch=='\n' || ch == ' '; ch=read());
if (ch == -1) {
s[len] = 0;
return *this;
}
for (; ch!='\n' && ch != ' ' && ch != -1;ch=read())
s[len++] = ch;
s[len] = 0;
return *this;
}
inline ios &operator>>(ld &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
inline ios &operator>>(long double &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
} io;
inline void handleCudaError(cudaError_t err, string name = "fuck") {
if (err != cudaSuccess) {
cerr << name << endl;
cerr << cudaGetErrorString(err) << endl;
exit(0);
}
}
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix");
handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix");
}
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bm, j = index % bm;
if (i >= an || j >= bm) return;
ld sum = 0;
if (i < an && j < bm) {
for (int k=0; k<am; ++k)
sum += d_a[i * am + k] * d_b[k * bm + j];
}
if (i * bm + j < an * bm)
d_c[i * bm + j] = sum;
}
__global__ void matrixMult2(ld *d_a, ld *d_b, ld *d_c, int an, int bn, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bn, j = index % bn;
if (i >= an || j >= bn) return;
ld sum = 0;
for (int k=0; k<am; ++k)
sum += d_a[i*am + k] * d_b[j*am+k];
d_c[i * bn + j] = sum;
}
__global__ void matrixMult3(ld *d_a, ld *d_b, ld *d_c, int an, int bn, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bn, j = index % bn;
if (i >= an || j >= bn) return;
// int shareda = min(am, max_shared_size);
// __shared__ ld c_a[max_shared_size];
int shareda = 0;
int baseb = j * am, basea = i * am;
// for (int k=0; k<shareda; ++k) {
// c_a[k] = d_a[basea+k];
// }
// __syncthreads();
ld sum = 0;
// for (int k=0; k<shareda; ++k) {
// sum += c_a[basea + k] * d_b[baseb+k];
}
for (int k=shareda; k<am; ++k)
sum += d_a[basea + k] * d_b[baseb + k];
d_c[i * bn + j] = sum;
}
void outputMatrix(ld *a, int n, int m) {
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
void hostMatrixInput(ld **a, int &n, int &m, bool transpose = false) {
io >> n >> m;
*a = (ld*)malloc(sizeof(ld) * n * m);
if (!transpose)
for (int i=0; i<n; ++i) {
int st = i*m, ed = st+m;
for (int j=st; j<ed; ++j) {
io >> (*a)[j];
}
}
else {
for (int i=0; i<n; ++i) {
for (int j=0; j<m; ++j) {
io >> (*a)[j * n + i];
}
}
swap(n, m);
}
}
int main()
{
// #ifndef Weaverzhu
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
cerr << prop.name << endl;
hostMatrixInput(&h_a, an, am);
hostMatrixInput(&h_b, bn, bm, true);
n = an;
m = bn;
int block_size = prop.maxThreadsPerBlock, grids = (n * m + block_size - 1) / block_size;
copyMatrix(h_a, d_a, an, am);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(cudaMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c");
// cerr << an << ' ' << am << ' ' << bn << ' ' << bm << ' ' << endl;
matrixMult2<<<grids, block_size>>>(d_a, d_b, d_c, an, bn, am);
h_c = (ld*)malloc(sizeof(ld) * n * m);
int size = sizeof(ld) * n * m;
handleCudaError(cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost), "memcpy back");
outputMatrix(h_c, n, m);
output::flush();
return 0;
}
|
68698685d93e20b673d24c120fb65935df748e08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zclaswp.cu mixed zc -> ds, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
#define NB 64
// TODO check precision, as in dlag2s?
__global__ void
dslaswp_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
float res;
if (ind < m) {
SA += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_S_MAKE( (float)(A[newind+i*lda]),
(float)(A[newind+i*lda]) );
SA[i*lda] = res;
}
}
}
__global__ void
dslaswp_inv_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
double res;
if (ind < m) {
A += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_D_MAKE( (double)(SA[newind+i*lda]),
(double)(SA[newind+i*lda]) );
A[i*lda] = res;
}
}
}
/**
Purpose
-------
Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or
row i of SA is cast to double precision in row ipiv[i] of A (incx < 0),
for 0 <= i < M.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix A.
@param[in,out]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
On entry, the M-by-N matrix to which the row interchanges will be applied.
TODO update docs
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in,out]
SA REAL array on the GPU, dimension (LDA,N)
On exit, the single precision, permuted matrix.
TODO update docs
@param[in]
m The number of rows to be interchanged.
@param[in]
ipiv INTEGER array on the GPU, dimension (M)
The vector of pivot indices. Row i of A is cast to single
precision in row ipiv[i] of SA, for 0 <= i < m.
@param[in]
incx INTEGER
If INCX is negative, the pivots are applied in reverse order,
otherwise in straight-forward order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dslaswp_q(
magma_int_t n, double *A, magma_int_t lda,
float *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx,
magma_queue_t queue )
{
int blocks = (m - 1)/NB + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(NB, 1, 1);
if (incx >= 0)
hipLaunchKernelGGL(( dslaswp_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv);
else
hipLaunchKernelGGL(( dslaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv);
}
/**
@see magmablas_dslaswp_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dslaswp(
magma_int_t n, double *A, magma_int_t lda,
float *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx )
{
magmablas_dslaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream );
}
| 68698685d93e20b673d24c120fb65935df748e08.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zclaswp.cu mixed zc -> ds, Wed Sep 17 15:08:23 2014
*/
#include "common_magma.h"
#define NB 64
// TODO check precision, as in dlag2s?
__global__ void
dslaswp_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
float res;
if (ind < m) {
SA += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_S_MAKE( (float)(A[newind+i*lda]),
(float)(A[newind+i*lda]) );
SA[i*lda] = res;
}
}
}
__global__ void
dslaswp_inv_kernel(int n, double *A, int lda, float *SA, int m, const magma_int_t *ipiv)
{
int ind = blockIdx.x*NB + threadIdx.x;
int newind;
double res;
if (ind < m) {
A += ind;
ipiv += ind;
newind = ipiv[0];
for(int i=0; i < n; i++) {
res = MAGMA_D_MAKE( (double)(SA[newind+i*lda]),
(double)(SA[newind+i*lda]) );
A[i*lda] = res;
}
}
}
/**
Purpose
-------
Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or
row i of SA is cast to double precision in row ipiv[i] of A (incx < 0),
for 0 <= i < M.
@param[in]
n INTEGER.
On entry, N specifies the number of columns of the matrix A.
@param[in,out]
A DOUBLE PRECISION array on the GPU, dimension (LDA,N)
On entry, the M-by-N matrix to which the row interchanges will be applied.
TODO update docs
@param[in]
lda INTEGER.
LDA specifies the leading dimension of A.
@param[in,out]
SA REAL array on the GPU, dimension (LDA,N)
On exit, the single precision, permuted matrix.
TODO update docs
@param[in]
m The number of rows to be interchanged.
@param[in]
ipiv INTEGER array on the GPU, dimension (M)
The vector of pivot indices. Row i of A is cast to single
precision in row ipiv[i] of SA, for 0 <= i < m.
@param[in]
incx INTEGER
If INCX is negative, the pivots are applied in reverse order,
otherwise in straight-forward order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dslaswp_q(
magma_int_t n, double *A, magma_int_t lda,
float *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx,
magma_queue_t queue )
{
int blocks = (m - 1)/NB + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(NB, 1, 1);
if (incx >= 0)
dslaswp_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv);
else
dslaswp_inv_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv);
}
/**
@see magmablas_dslaswp_q
@ingroup magma_daux2
********************************************************************/
extern "C" void
magmablas_dslaswp(
magma_int_t n, double *A, magma_int_t lda,
float *SA, magma_int_t m,
const magma_int_t *ipiv, magma_int_t incx )
{
magmablas_dslaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream );
}
|
4246d94bc76f55638f47fb0e2dfa5b34ccc0b620.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <test_utils.h>
#include <umap/runner.cuh>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/manifold/umap.hpp>
#include <cuml/manifold/umapparams.h>
#include <cuml/metrics/metrics.hpp>
#include <cuml/neighbors/knn.hpp>
#include <datasets/digits.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <datasets/digits.h>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <selection/knn.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/distance/distance.hpp>
#include <raft/handle.hpp>
#include <selection/knn.cuh>
#include <umap/runner.cuh>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include <vector>
using namespace ML;
using namespace ML::Metrics;
using namespace std;
using namespace MLCommon;
using namespace MLCommon::Datasets::Digits;
template <typename T>
__global__ void has_nan_kernel(T* data, size_t len, bool* answer)
{
static_assert(std::is_floating_point<T>());
std::size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if ((tid < len) && isnan(data[tid])) { *answer = true; }
}
template <typename T>
bool has_nan(T* data, size_t len, hipStream_t stream)
{
dim3 blk(256);
dim3 grid(raft::ceildiv(len, (size_t)blk.x));
bool h_answer = false;
rmm::device_scalar<bool> d_answer(stream);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
hipLaunchKernelGGL(( has_nan_kernel), dim3(grid), dim3(blk), 0, stream, data, len, d_answer.data());
h_answer = d_answer.value(stream);
return h_answer;
}
template <typename T>
__global__ void are_equal_kernel(T* embedding1, T* embedding2, size_t len, double* diff)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (embedding1[tid] != embedding2[tid]) {
atomicAdd(diff, abs(embedding1[tid] - embedding2[tid]));
}
}
template <typename T>
bool are_equal(T* embedding1, T* embedding2, size_t len, hipStream_t stream)
{
double h_answer = 0.;
rmm::device_scalar<double> d_answer(stream);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
hipLaunchKernelGGL(( are_equal_kernel), dim3(raft::ceildiv(len, (size_t)32)), dim3(32), 0, stream,
embedding1, embedding2, len, d_answer.data());
h_answer = d_answer.value(stream);
double tolerance = 1.0;
if (h_answer > tolerance) {
std::cout << "Not equal, difference : " << h_answer << std::endl;
return false;
}
return true;
}
class UMAPParametrizableTest : public ::testing::Test {
protected:
struct TestParams {
bool fit_transform;
bool supervised;
bool knn_params;
bool refine;
int n_samples;
int n_features;
int n_clusters;
double min_trustworthiness;
};
void get_embedding(raft::handle_t& handle,
float* X,
float* y,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
hipStream_t stream = handle.get_stream();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
rmm::device_uvector<int64_t>* knn_indices_b{};
rmm::device_uvector<float>* knn_dists_b{};
int64_t* knn_indices{};
float* knn_dists{};
if (test_params.knn_params) {
knn_indices_b = new rmm::device_uvector<int64_t>(n_samples * umap_params.n_neighbors, stream);
knn_dists_b = new rmm::device_uvector<float>(n_samples * umap_params.n_neighbors, stream);
knn_indices = knn_indices_b->data();
knn_dists = knn_dists_b->data();
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = X;
sizes[0] = n_samples;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
n_features,
X,
n_samples,
knn_indices,
knn_dists,
umap_params.n_neighbors);
handle.sync_stream(stream);
}
float* model_embedding = nullptr;
rmm::device_uvector<float>* model_embedding_b{};
if (test_params.fit_transform) {
model_embedding = embedding_ptr;
} else {
model_embedding_b =
new rmm::device_uvector<float>(n_samples * umap_params.n_components, stream);
model_embedding = model_embedding_b->data();
}
RAFT_CUDA_TRY(hipMemsetAsync(
model_embedding, 0, n_samples * umap_params.n_components * sizeof(float), stream));
handle.sync_stream(stream);
auto graph = raft::sparse::COO<float, int>(stream);
if (test_params.supervised) {
ML::UMAP::fit(handle,
X,
y,
n_samples,
n_features,
knn_indices,
knn_dists,
&umap_params,
model_embedding,
&graph);
} else {
ML::UMAP::fit(handle,
X,
nullptr,
n_samples,
n_features,
knn_indices,
knn_dists,
&umap_params,
model_embedding,
&graph);
}
if (test_params.refine) {
std::cout << "using refine";
if (test_params.supervised) {
auto cgraph_coo =
ML::UMAP::get_graph(handle, X, y, n_samples, n_features, nullptr, nullptr, &umap_params);
ML::UMAP::refine(
handle, X, n_samples, n_features, cgraph_coo.get(), &umap_params, model_embedding);
} else {
auto cgraph_coo = ML::UMAP::get_graph(
handle, X, nullptr, n_samples, n_features, nullptr, nullptr, &umap_params);
ML::UMAP::refine(
handle, X, n_samples, n_features, cgraph_coo.get(), &umap_params, model_embedding);
}
}
handle.sync_stream(stream);
if (!test_params.fit_transform) {
RAFT_CUDA_TRY(hipMemsetAsync(
embedding_ptr, 0, n_samples * umap_params.n_components * sizeof(float), stream));
handle.sync_stream(stream);
ML::UMAP::transform(handle,
X,
n_samples,
umap_params.n_components,
knn_indices,
knn_dists,
X,
n_samples,
model_embedding,
n_samples,
&umap_params,
embedding_ptr);
handle.sync_stream(stream);
delete model_embedding_b;
}
if (test_params.knn_params) {
delete knn_indices_b;
delete knn_dists_b;
}
}
void assertions(raft::handle_t& handle,
float* X,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
hipStream_t stream = handle.get_stream();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
ASSERT_TRUE(!has_nan(embedding_ptr, n_samples * umap_params.n_components, stream));
double trustworthiness =
trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>(
handle,
X,
embedding_ptr,
n_samples,
n_features,
umap_params.n_components,
umap_params.n_neighbors);
std::cout << "min. expected trustworthiness: " << test_params.min_trustworthiness << std::endl;
std::cout << "trustworthiness: " << trustworthiness << std::endl;
ASSERT_TRUE(trustworthiness > test_params.min_trustworthiness);
}
void test(TestParams& test_params, UMAPParams& umap_params)
{
std::cout << "\numap_params : [" << std::boolalpha << umap_params.n_neighbors << "-"
<< umap_params.n_components << "-" << umap_params.n_epochs << "-"
<< umap_params.random_state << std::endl;
std::cout << "test_params : [" << std::boolalpha << test_params.fit_transform << "-"
<< test_params.supervised << "-" << test_params.refine << "-"
<< test_params.knn_params << "-" << test_params.n_samples << "-"
<< test_params.n_features << "-" << test_params.n_clusters << "-"
<< test_params.min_trustworthiness << "]" << std::endl;
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
UMAP::find_ab(handle, &umap_params);
rmm::device_uvector<float> X_d(n_samples * n_features, stream);
rmm::device_uvector<int> y_d(n_samples, stream);
ML::Datasets::make_blobs(handle,
X_d.data(),
y_d.data(),
n_samples,
n_features,
test_params.n_clusters,
true,
nullptr,
nullptr,
1.f,
true,
-10.f,
10.f,
1234ULL);
handle.sync_stream(stream);
raft::linalg::convert_array((float*)y_d.data(), y_d.data(), n_samples, stream);
handle.sync_stream(stream);
rmm::device_uvector<float> embeddings1(n_samples * umap_params.n_components, stream);
float* e1 = embeddings1.data();
#if CUDART_VERSION >= 11020
// Always use random init w/ CUDA 11.2. For some reason the
// spectral solver doesn't always converge w/ this CUDA version.
umap_params.init = 0;
umap_params.random_state = 43;
umap_params.n_epochs = 500;
#endif
get_embedding(handle, X_d.data(), (float*)y_d.data(), e1, test_params, umap_params);
assertions(handle, X_d.data(), e1, test_params, umap_params);
// v21.08: Reproducibility looks to be busted for CTK 11.4. Need to figure out
// why this is happening and re-enable this.
#if CUDART_VERSION == 11040
return;
#else
// Disable reproducibility tests after transformation
if (!test_params.fit_transform) { return; }
#endif
rmm::device_uvector<float> embeddings2(n_samples * umap_params.n_components, stream);
float* e2 = embeddings2.data();
get_embedding(handle, X_d.data(), (float*)y_d.data(), e2, test_params, umap_params);
#if CUDART_VERSION >= 11020
auto equal = are_equal(e1, e2, n_samples * umap_params.n_components, stream);
if (!equal) {
raft::print_device_vector("e1", e1, 25, std::cout);
raft::print_device_vector("e2", e2, 25, std::cout);
}
ASSERT_TRUE(equal);
#else
ASSERT_TRUE(
raft::devArrMatch(e1, e2, n_samples * umap_params.n_components, raft::Compare<float>{}));
#endif
}
void SetUp() override
{
std::vector<TestParams> test_params_vec = {{false, false, false, true, 2000, 50, 20, 0.45},
{true, false, false, false, 2000, 50, 20, 0.45},
{false, true, false, true, 2000, 50, 20, 0.45},
{false, false, true, false, 2000, 50, 20, 0.45},
{true, true, false, true, 2000, 50, 20, 0.45},
{true, false, true, false, 2000, 50, 20, 0.45},
{false, true, true, true, 2000, 50, 20, 0.45},
{true, true, true, false, 2000, 50, 20, 0.45}};
std::vector<UMAPParams> umap_params_vec(4);
umap_params_vec[0].n_components = 2;
umap_params_vec[1].n_components = 10;
umap_params_vec[2].n_components = 21;
umap_params_vec[2].random_state = 43;
umap_params_vec[2].init = 0;
umap_params_vec[2].n_epochs = 500;
umap_params_vec[3].n_components = 25;
umap_params_vec[3].random_state = 43;
umap_params_vec[3].init = 0;
umap_params_vec[3].n_epochs = 500;
for (auto& umap_params : umap_params_vec) {
for (auto& test_params : test_params_vec) {
test(test_params, umap_params);
}
}
}
void TearDown() override {}
};
typedef UMAPParametrizableTest UMAPParametrizableTest;
TEST_F(UMAPParametrizableTest, Result) {}
| 4246d94bc76f55638f47fb0e2dfa5b34ccc0b620.cu | /*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <test_utils.h>
#include <umap/runner.cuh>
#include <cuml/datasets/make_blobs.hpp>
#include <cuml/manifold/umap.hpp>
#include <cuml/manifold/umapparams.h>
#include <cuml/metrics/metrics.hpp>
#include <cuml/neighbors/knn.hpp>
#include <datasets/digits.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <datasets/digits.h>
#include <raft/linalg/reduce_rows_by_key.cuh>
#include <selection/knn.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/distance/distance.hpp>
#include <raft/handle.hpp>
#include <selection/knn.cuh>
#include <umap/runner.cuh>
#include <gtest/gtest.h>
#include <cstddef>
#include <iostream>
#include <type_traits>
#include <vector>
using namespace ML;
using namespace ML::Metrics;
using namespace std;
using namespace MLCommon;
using namespace MLCommon::Datasets::Digits;
template <typename T>
__global__ void has_nan_kernel(T* data, size_t len, bool* answer)
{
static_assert(std::is_floating_point<T>());
std::size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if ((tid < len) && isnan(data[tid])) { *answer = true; }
}
template <typename T>
bool has_nan(T* data, size_t len, cudaStream_t stream)
{
dim3 blk(256);
dim3 grid(raft::ceildiv(len, (size_t)blk.x));
bool h_answer = false;
rmm::device_scalar<bool> d_answer(stream);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
has_nan_kernel<<<grid, blk, 0, stream>>>(data, len, d_answer.data());
h_answer = d_answer.value(stream);
return h_answer;
}
template <typename T>
__global__ void are_equal_kernel(T* embedding1, T* embedding2, size_t len, double* diff)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= len) return;
if (embedding1[tid] != embedding2[tid]) {
atomicAdd(diff, abs(embedding1[tid] - embedding2[tid]));
}
}
template <typename T>
bool are_equal(T* embedding1, T* embedding2, size_t len, cudaStream_t stream)
{
double h_answer = 0.;
rmm::device_scalar<double> d_answer(stream);
raft::update_device(d_answer.data(), &h_answer, 1, stream);
are_equal_kernel<<<raft::ceildiv(len, (size_t)32), 32, 0, stream>>>(
embedding1, embedding2, len, d_answer.data());
h_answer = d_answer.value(stream);
double tolerance = 1.0;
if (h_answer > tolerance) {
std::cout << "Not equal, difference : " << h_answer << std::endl;
return false;
}
return true;
}
class UMAPParametrizableTest : public ::testing::Test {
protected:
struct TestParams {
bool fit_transform;
bool supervised;
bool knn_params;
bool refine;
int n_samples;
int n_features;
int n_clusters;
double min_trustworthiness;
};
void get_embedding(raft::handle_t& handle,
float* X,
float* y,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
cudaStream_t stream = handle.get_stream();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
rmm::device_uvector<int64_t>* knn_indices_b{};
rmm::device_uvector<float>* knn_dists_b{};
int64_t* knn_indices{};
float* knn_dists{};
if (test_params.knn_params) {
knn_indices_b = new rmm::device_uvector<int64_t>(n_samples * umap_params.n_neighbors, stream);
knn_dists_b = new rmm::device_uvector<float>(n_samples * umap_params.n_neighbors, stream);
knn_indices = knn_indices_b->data();
knn_dists = knn_dists_b->data();
std::vector<float*> ptrs(1);
std::vector<int> sizes(1);
ptrs[0] = X;
sizes[0] = n_samples;
raft::spatial::knn::brute_force_knn(handle,
ptrs,
sizes,
n_features,
X,
n_samples,
knn_indices,
knn_dists,
umap_params.n_neighbors);
handle.sync_stream(stream);
}
float* model_embedding = nullptr;
rmm::device_uvector<float>* model_embedding_b{};
if (test_params.fit_transform) {
model_embedding = embedding_ptr;
} else {
model_embedding_b =
new rmm::device_uvector<float>(n_samples * umap_params.n_components, stream);
model_embedding = model_embedding_b->data();
}
RAFT_CUDA_TRY(cudaMemsetAsync(
model_embedding, 0, n_samples * umap_params.n_components * sizeof(float), stream));
handle.sync_stream(stream);
auto graph = raft::sparse::COO<float, int>(stream);
if (test_params.supervised) {
ML::UMAP::fit(handle,
X,
y,
n_samples,
n_features,
knn_indices,
knn_dists,
&umap_params,
model_embedding,
&graph);
} else {
ML::UMAP::fit(handle,
X,
nullptr,
n_samples,
n_features,
knn_indices,
knn_dists,
&umap_params,
model_embedding,
&graph);
}
if (test_params.refine) {
std::cout << "using refine";
if (test_params.supervised) {
auto cgraph_coo =
ML::UMAP::get_graph(handle, X, y, n_samples, n_features, nullptr, nullptr, &umap_params);
ML::UMAP::refine(
handle, X, n_samples, n_features, cgraph_coo.get(), &umap_params, model_embedding);
} else {
auto cgraph_coo = ML::UMAP::get_graph(
handle, X, nullptr, n_samples, n_features, nullptr, nullptr, &umap_params);
ML::UMAP::refine(
handle, X, n_samples, n_features, cgraph_coo.get(), &umap_params, model_embedding);
}
}
handle.sync_stream(stream);
if (!test_params.fit_transform) {
RAFT_CUDA_TRY(cudaMemsetAsync(
embedding_ptr, 0, n_samples * umap_params.n_components * sizeof(float), stream));
handle.sync_stream(stream);
ML::UMAP::transform(handle,
X,
n_samples,
umap_params.n_components,
knn_indices,
knn_dists,
X,
n_samples,
model_embedding,
n_samples,
&umap_params,
embedding_ptr);
handle.sync_stream(stream);
delete model_embedding_b;
}
if (test_params.knn_params) {
delete knn_indices_b;
delete knn_dists_b;
}
}
void assertions(raft::handle_t& handle,
float* X,
float* embedding_ptr,
TestParams& test_params,
UMAPParams& umap_params)
{
cudaStream_t stream = handle.get_stream();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
ASSERT_TRUE(!has_nan(embedding_ptr, n_samples * umap_params.n_components, stream));
double trustworthiness =
trustworthiness_score<float, raft::distance::DistanceType::L2SqrtUnexpanded>(
handle,
X,
embedding_ptr,
n_samples,
n_features,
umap_params.n_components,
umap_params.n_neighbors);
std::cout << "min. expected trustworthiness: " << test_params.min_trustworthiness << std::endl;
std::cout << "trustworthiness: " << trustworthiness << std::endl;
ASSERT_TRUE(trustworthiness > test_params.min_trustworthiness);
}
void test(TestParams& test_params, UMAPParams& umap_params)
{
std::cout << "\numap_params : [" << std::boolalpha << umap_params.n_neighbors << "-"
<< umap_params.n_components << "-" << umap_params.n_epochs << "-"
<< umap_params.random_state << std::endl;
std::cout << "test_params : [" << std::boolalpha << test_params.fit_transform << "-"
<< test_params.supervised << "-" << test_params.refine << "-"
<< test_params.knn_params << "-" << test_params.n_samples << "-"
<< test_params.n_features << "-" << test_params.n_clusters << "-"
<< test_params.min_trustworthiness << "]" << std::endl;
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
int& n_samples = test_params.n_samples;
int& n_features = test_params.n_features;
UMAP::find_ab(handle, &umap_params);
rmm::device_uvector<float> X_d(n_samples * n_features, stream);
rmm::device_uvector<int> y_d(n_samples, stream);
ML::Datasets::make_blobs(handle,
X_d.data(),
y_d.data(),
n_samples,
n_features,
test_params.n_clusters,
true,
nullptr,
nullptr,
1.f,
true,
-10.f,
10.f,
1234ULL);
handle.sync_stream(stream);
raft::linalg::convert_array((float*)y_d.data(), y_d.data(), n_samples, stream);
handle.sync_stream(stream);
rmm::device_uvector<float> embeddings1(n_samples * umap_params.n_components, stream);
float* e1 = embeddings1.data();
#if CUDART_VERSION >= 11020
// Always use random init w/ CUDA 11.2. For some reason the
// spectral solver doesn't always converge w/ this CUDA version.
umap_params.init = 0;
umap_params.random_state = 43;
umap_params.n_epochs = 500;
#endif
get_embedding(handle, X_d.data(), (float*)y_d.data(), e1, test_params, umap_params);
assertions(handle, X_d.data(), e1, test_params, umap_params);
// v21.08: Reproducibility looks to be busted for CTK 11.4. Need to figure out
// why this is happening and re-enable this.
#if CUDART_VERSION == 11040
return;
#else
// Disable reproducibility tests after transformation
if (!test_params.fit_transform) { return; }
#endif
rmm::device_uvector<float> embeddings2(n_samples * umap_params.n_components, stream);
float* e2 = embeddings2.data();
get_embedding(handle, X_d.data(), (float*)y_d.data(), e2, test_params, umap_params);
#if CUDART_VERSION >= 11020
auto equal = are_equal(e1, e2, n_samples * umap_params.n_components, stream);
if (!equal) {
raft::print_device_vector("e1", e1, 25, std::cout);
raft::print_device_vector("e2", e2, 25, std::cout);
}
ASSERT_TRUE(equal);
#else
ASSERT_TRUE(
raft::devArrMatch(e1, e2, n_samples * umap_params.n_components, raft::Compare<float>{}));
#endif
}
void SetUp() override
{
std::vector<TestParams> test_params_vec = {{false, false, false, true, 2000, 50, 20, 0.45},
{true, false, false, false, 2000, 50, 20, 0.45},
{false, true, false, true, 2000, 50, 20, 0.45},
{false, false, true, false, 2000, 50, 20, 0.45},
{true, true, false, true, 2000, 50, 20, 0.45},
{true, false, true, false, 2000, 50, 20, 0.45},
{false, true, true, true, 2000, 50, 20, 0.45},
{true, true, true, false, 2000, 50, 20, 0.45}};
std::vector<UMAPParams> umap_params_vec(4);
umap_params_vec[0].n_components = 2;
umap_params_vec[1].n_components = 10;
umap_params_vec[2].n_components = 21;
umap_params_vec[2].random_state = 43;
umap_params_vec[2].init = 0;
umap_params_vec[2].n_epochs = 500;
umap_params_vec[3].n_components = 25;
umap_params_vec[3].random_state = 43;
umap_params_vec[3].init = 0;
umap_params_vec[3].n_epochs = 500;
for (auto& umap_params : umap_params_vec) {
for (auto& test_params : test_params_vec) {
test(test_params, umap_params);
}
}
}
void TearDown() override {}
};
typedef UMAPParametrizableTest UMAPParametrizableTest;
TEST_F(UMAPParametrizableTest, Result) {}
|
004eeda42bb2cbd1954bd4b4dd9367fc4f174f1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// copied and pasted from pytorch to test if this passes the build.
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/upsample_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
inline __device__ int idx(
const int n,
const int num_channels,
const int c,
const int height,
const int width,
const int y,
const int x) {
return ((n * num_channels + c) * height + y) * width + x;
}
// input is X, output is Y
__global__ void UpsampleBilinearKernel(
const int num_batch,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* __restrict__ X,
float* __restrict__ Y) {
const int size = output_height * output_width;
CUDA_1D_KERNEL_LOOP(index, size) {
int indexTemp = index;
const int out_x = indexTemp % output_width;
indexTemp /= output_width;
const int out_y = indexTemp % output_height;
indexTemp /= output_height;
indexTemp /= num_channels;
const float rheight =
output_height > 1 ? (input_height - 1.f) / (output_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (input_width - 1.f) / (output_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * out_y;
const int h1 = (int)h1r;
const int h1p = (h1 < input_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * out_x;
const int w1 = (int)w1r;
const int w1p = (w1 < input_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
for (int n = 0; n < num_batch; n++){
for (int c = 0; c < num_channels; c++) {
float X0 = X[idx(n, num_channels, c, input_height, input_width, h1, w1)];
float X1 = X[idx(n, num_channels, c, input_height, input_width, h1, w1 + w1p)];
float X2 = X[idx(n, num_channels, c, input_height, input_width, h1 + h1p, w1)];
float X3 = X[idx(n, num_channels, c, input_height, input_width, h1 + h1p, w1 + w1p)];
Y[idx(n, num_channels, c, output_height, output_width, out_y, out_x)] =
h0lambda * (w0lambda * X0 + w1lambda * X1) +
h1lambda * (w0lambda * X2 + w1lambda * X3);
}
}
}
}
// input is dY, output is dX
__global__ void UpsampleBilinearGradientKernel(
const int input_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
CUDA_1D_KERNEL_LOOP(index, input_size) {
int indexTemp = index;
const int in_x = indexTemp % input_width;
indexTemp /= input_width;
const int in_y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(in_y / height_scale, output_height - 1);
const int out_x = fminf(in_x / width_scale, output_width - 1);
const float rheight =
output_height > 1 ? (output_height - 1.f) / (input_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (output_width - 1.f) / (input_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * in_y;
const int h1 = (int)h1r;
const int h1p = (h1 < output_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * in_x;
const int w1 = (int)w1r;
const int w1p = (w1 < output_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
#if __CUDA_ARCH__ >= 350
const float dYi = __ldg(&dY[index]);
#else
const float dYi = dY[index];
#endif
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1)],
h0lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1 + w1p)],
h0lambda * w1lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1 + h1p, w1)],
h1lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(
n,
num_channels,
c,
output_height,
output_width,
h1 + h1p,
w1 + w1p)],
h1lambda * w1lambda * dYi);
}
}
} // namespace
template <>
bool UpsampleBilinearOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto inputDims = X.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = X.dim32(0), num_channels = X.dim32(1),
input_height = X.dim32(2), input_width = X.dim32(3);
if (InputSize() == 2) {
const auto& scales = Input(1);
CAFFE_ENFORCE_EQ(scales.dim(), 1);
CAFFE_ENFORCE_EQ(scales.numel(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
int output_width = input_width * width_scale_;
int output_height = input_height * height_scale_;
auto* Y = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
const auto size = output_height * output_width;
hipLaunchKernelGGL(( UpsampleBilinearKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
batch_size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool UpsampleBilinearGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& dY = Input(0);
const auto& X = Input(1);
const auto inputDims = dY.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = dY.dim32(0);
const int num_channels = dY.dim32(1);
const int input_height = dY.dim32(2);
const int input_width = dY.dim32(3);
const int output_height = X.dim32(2);
const int output_width = X.dim32(3);
if (InputSize() == 3) {
const auto& scales = Input(2);
CAFFE_ENFORCE_EQ(scales.dim(), 1);
CAFFE_ENFORCE_EQ(scales.numel(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
auto* dX = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
math::Set<float, CUDAContext>(
dX->numel(), 0.0f, dX->mutable_data<float>(), &context_);
const auto size = dY.numel();
hipLaunchKernelGGL(( UpsampleBilinearGradientKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
UpsampleBilinear,
UpsampleBilinearOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
UpsampleBilinearGradient,
UpsampleBilinearGradientOp<float, CUDAContext>);
} // namespace caffe2
| 004eeda42bb2cbd1954bd4b4dd9367fc4f174f1c.cu | // copied and pasted from pytorch to test if this passes the build.
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/upsample_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
inline __device__ int idx(
const int n,
const int num_channels,
const int c,
const int height,
const int width,
const int y,
const int x) {
return ((n * num_channels + c) * height + y) * width + x;
}
// input is X, output is Y
__global__ void UpsampleBilinearKernel(
const int num_batch,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* __restrict__ X,
float* __restrict__ Y) {
const int size = output_height * output_width;
CUDA_1D_KERNEL_LOOP(index, size) {
int indexTemp = index;
const int out_x = indexTemp % output_width;
indexTemp /= output_width;
const int out_y = indexTemp % output_height;
indexTemp /= output_height;
indexTemp /= num_channels;
const float rheight =
output_height > 1 ? (input_height - 1.f) / (output_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (input_width - 1.f) / (output_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * out_y;
const int h1 = (int)h1r;
const int h1p = (h1 < input_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * out_x;
const int w1 = (int)w1r;
const int w1p = (w1 < input_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
for (int n = 0; n < num_batch; n++){
for (int c = 0; c < num_channels; c++) {
float X0 = X[idx(n, num_channels, c, input_height, input_width, h1, w1)];
float X1 = X[idx(n, num_channels, c, input_height, input_width, h1, w1 + w1p)];
float X2 = X[idx(n, num_channels, c, input_height, input_width, h1 + h1p, w1)];
float X3 = X[idx(n, num_channels, c, input_height, input_width, h1 + h1p, w1 + w1p)];
Y[idx(n, num_channels, c, output_height, output_width, out_y, out_x)] =
h0lambda * (w0lambda * X0 + w1lambda * X1) +
h1lambda * (w0lambda * X2 + w1lambda * X3);
}
}
}
}
// input is dY, output is dX
__global__ void UpsampleBilinearGradientKernel(
const int input_size,
const int num_channels,
const int input_height,
const int input_width,
const int output_height,
const int output_width,
const float height_scale,
const float width_scale,
const float* dY,
float* dX) {
CUDA_1D_KERNEL_LOOP(index, input_size) {
int indexTemp = index;
const int in_x = indexTemp % input_width;
indexTemp /= input_width;
const int in_y = indexTemp % input_height;
indexTemp /= input_height;
const int c = indexTemp % num_channels;
indexTemp /= num_channels;
const int n = indexTemp;
const int out_y = fminf(in_y / height_scale, output_height - 1);
const int out_x = fminf(in_x / width_scale, output_width - 1);
const float rheight =
output_height > 1 ? (output_height - 1.f) / (input_height - 1.f) : 0.f;
const float rwidth =
output_width > 1 ? (output_width - 1.f) / (input_width - 1.f) : 0.f;
// Compute Y axis lambdas
const float h1r = rheight * in_y;
const int h1 = (int)h1r;
const int h1p = (h1 < output_height - 1) ? 1 : 0;
const float h1lambda = h1r - h1;
const float h0lambda = 1.f - h1lambda;
// Compute X axis lambdas
const float w1r = rwidth * in_x;
const int w1 = (int)w1r;
const int w1p = (w1 < output_width - 1) ? 1 : 0;
const float w1lambda = w1r - w1;
const float w0lambda = 1.f - w1lambda;
#if __CUDA_ARCH__ >= 350
const float dYi = __ldg(&dY[index]);
#else
const float dYi = dY[index];
#endif
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1)],
h0lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1, w1 + w1p)],
h0lambda * w1lambda * dYi);
atomicAdd(
&dX[idx(n, num_channels, c, output_height, output_width, h1 + h1p, w1)],
h1lambda * w0lambda * dYi);
atomicAdd(
&dX[idx(
n,
num_channels,
c,
output_height,
output_width,
h1 + h1p,
w1 + w1p)],
h1lambda * w1lambda * dYi);
}
}
} // namespace
template <>
bool UpsampleBilinearOp<float, CUDAContext>::RunOnDevice() {
const auto& X = Input(0);
const auto inputDims = X.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = X.dim32(0), num_channels = X.dim32(1),
input_height = X.dim32(2), input_width = X.dim32(3);
if (InputSize() == 2) {
const auto& scales = Input(1);
CAFFE_ENFORCE_EQ(scales.dim(), 1);
CAFFE_ENFORCE_EQ(scales.numel(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
int output_width = input_width * width_scale_;
int output_height = input_height * height_scale_;
auto* Y = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
const auto size = output_height * output_width;
UpsampleBilinearKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
batch_size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
X.data<float>(),
Y->template mutable_data<float>());
return true;
}
template <>
bool UpsampleBilinearGradientOp<float, CUDAContext>::RunOnDevice() {
const auto& dY = Input(0);
const auto& X = Input(1);
const auto inputDims = dY.sizes();
CAFFE_ENFORCE_EQ(4, inputDims.size());
const int batch_size = dY.dim32(0);
const int num_channels = dY.dim32(1);
const int input_height = dY.dim32(2);
const int input_width = dY.dim32(3);
const int output_height = X.dim32(2);
const int output_width = X.dim32(3);
if (InputSize() == 3) {
const auto& scales = Input(2);
CAFFE_ENFORCE_EQ(scales.dim(), 1);
CAFFE_ENFORCE_EQ(scales.numel(), 2);
float scales_data[2];
context_.CopyToCPU<float>(2, scales.data<float>(), scales_data);
height_scale_ = scales_data[0];
width_scale_ = scales_data[1];
}
auto* dX = Output(
0,
{batch_size, num_channels, output_height, output_width},
at::dtype<float>());
math::Set<float, CUDAContext>(
dX->numel(), 0.0f, dX->mutable_data<float>(), &context_);
const auto size = dY.numel();
UpsampleBilinearGradientKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
size,
num_channels,
input_height,
input_width,
output_height,
output_width,
height_scale_,
width_scale_,
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(
UpsampleBilinear,
UpsampleBilinearOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
UpsampleBilinearGradient,
UpsampleBilinearGradientOp<float, CUDAContext>);
} // namespace caffe2
|
40e4ef0ae2afa2e93fc2d18bfa77653adb86784c.hip | // !!! This is a file automatically generated by hipify!!!
/*
======================================================================
=======================================================================
*/
#include <stdlib.h>
#include <stdio.h>
#include <GL/glut.h>
#include "solver.h"
#include "io.h"
#include "cuda_solver.h"
/* Device Simulation State */
static GPUSTATE gpu;
/* global variables */
static int N;
static float dt, diff, visc;
static float force, source;
static int dvel;
static fluid *u, *v, *u_prev, *v_prev;
static fluid *dens, *dens_prev;
static int win_id;
static int win_x, win_y;
static int mouse_down[3];
static int omx, omy, mx, my;
/*
----------------------------------------------------------------------
free/clear/allocate simulation data
----------------------------------------------------------------------
*/
static void free_data(void)
{
if (u)
hipHostFree(u);
if (v)
hipHostFree(v);
if (u_prev)
hipHostFree(u_prev);
if (v_prev)
hipHostFree(v_prev);
if (dens)
hipHostFree(dens);
if (dens_prev)
hipHostFree(dens_prev);
}
static void free_cuda_data(void)
{
checkCuda(hipFree(gpu.u));
checkCuda(hipFree(gpu.u_prev));
checkCuda(hipFree(gpu.v));
checkCuda(hipFree(gpu.v_prev));
checkCuda(hipFree(gpu.dens));
checkCuda(hipFree(gpu.dens_prev));
}
static void clear_data(void)
{
int i, size = (N + 2) * (N + 2);
for (i = 0; i < size; i++)
{
u[i] = v[i] = u_prev[i] = v_prev[i] = dens[i] = dens_prev[i] = 0.0f;
}
}
static int allocate_data(void)
{
int size = (N + 2) * (N + 2) * sizeof(fluid);
checkCuda(hipHostMalloc((void**)&u, size));
checkCuda(hipHostMalloc((void**)&v, size));
checkCuda(hipHostMalloc((void**)&u_prev, size));
checkCuda(hipHostMalloc((void**)&v_prev, size));
checkCuda(hipHostMalloc((void**)&dens, size));
checkCuda(hipHostMalloc((void**)&dens_prev, size));
if (!u || !v || !u_prev || !v_prev || !dens || !dens_prev)
{
fprintf(stderr, "cannot allocate data\n");
return (0);
}
return (1);
}
static int cuda_allocate_data(void)
{
int size = (N + 2) * (N + 2) * sizeof(fluid);
gpu.u = NULL;
checkCuda(hipMalloc((void **) &gpu.u, size));
gpu.v = NULL;
checkCuda(hipMalloc((void **) &gpu.v, size));
gpu.u_prev = NULL;
checkCuda(hipMalloc((void **) &gpu.u_prev, size));
gpu.v_prev = NULL;
checkCuda(hipMalloc((void **) &gpu.v_prev, size));
gpu.dens = NULL;
checkCuda(hipMalloc((void **) &gpu.dens, size));
gpu.dens_prev = NULL;
checkCuda(hipMalloc((void **) &gpu.dens_prev, size));
if (!gpu.u || !gpu.v || !gpu.u_prev ||
!gpu.v_prev || !gpu.dens || !gpu.dens_prev ) {
return 0;
}
return 1;
}
/*
----------------------------------------------------------------------
OpenGL specific drawing routines
----------------------------------------------------------------------
*/
static void pre_display(void)
{
glViewport(0, 0, win_x, win_y);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, 1.0, 0.0, 1.0);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
static void post_display(void)
{
glutSwapBuffers();
}
static void draw_velocity(void)
{
int i, j;
float x, y, h;
h = 1.0f / N;
glColor3f(1.0f, 1.0f, 1.0f);
glLineWidth(1.0f);
glBegin(GL_LINES);
for (i = 1; i <= N; i++)
{
x = (i - 0.5f) * h;
for (j = 1; j <= N; j++)
{
y = (j - 0.5f) * h;
glVertex2f(x, y);
glVertex2f(x + u[IX(i, j)], y + v[IX(i, j)]);
}
}
glEnd();
}
static void draw_density(void)
{
int i, j;
float x, y, h, d00, d01, d10, d11;
h = 1.0f / N;
glBegin(GL_QUADS);
for (i = 0; i <= N; i++)
{
x = (i - 0.5f) * h;
for (j = 0; j <= N; j++)
{
y = (j - 0.5f) * h;
d00 = dens[IX(i, j)];
d01 = dens[IX(i, j + 1)];
d10 = dens[IX(i + 1, j)];
d11 = dens[IX(i + 1, j + 1)];
glColor3f(d00, d00, d00);
glVertex2f(x, y);
glColor3f(d10, d10, d10);
glVertex2f(x + h, y);
glColor3f(d11, d11, d11);
glVertex2f(x + h, y + h);
glColor3f(d01, d01, d01);
glVertex2f(x, y + h);
}
}
glEnd();
}
/*
----------------------------------------------------------------------
relates mouse movements to forces sources
----------------------------------------------------------------------
*/
static void get_from_UI(fluid *d, fluid *u, fluid *v)
{
int i, j, size = (N + 2) * (N + 2);
int range = (N/128) + 1;
for (i = 0; i < size; i++)
{
u[i] = v[i] = d[i] = 0.0f;
}
if (!mouse_down[0] && !mouse_down[2])
return;
i = (int)((mx / (float)win_x) * N + 1);
j = (int)(((win_y - my) / (float)win_y) * N + 1);
if (i < 1 || i > N || j < 1 || j > N)
return;
if (mouse_down[0])
{
u[IX(i, j)] = force * (mx - omx);
v[IX(i, j)] = force * (omy - my);
}
if (mouse_down[2])
{
for (int ii = -range; ii <= range; ii++)
{
for (int jj = -range; jj <= range; jj++)
{
if (i + ii < 1 || i + ii > N || j + jj < 1 || j + jj > N)
continue;
d[IX(i + ii, j + jj)] = source / (3 + range);
}
}
}
omx = mx;
omy = my;
return;
}
/*
----------------------------------------------------------------------
GLUT callback routines
----------------------------------------------------------------------
*/
static void key_func(unsigned char key, int x, int y)
{
switch (key)
{
case 'c':
case 'C':
clear_data();
break;
case 'q':
case 'Q':
free_data();
free_cuda_data();
exit(0);
break;
case 'v':
case 'V':
dvel = !dvel;
break;
case 's':
case 'S':
save_to_disk("state.fluid", N, u, v, u_prev, v_prev, dens, dens_prev);
break;
case 'r':
case 'R':
read_from_disk("state.fluid", N, u, v, u_prev, v_prev, dens, dens_prev);
break;
}
}
static void mouse_func(int button, int state, int x, int y)
{
omx = mx = x;
omx = my = y;
mouse_down[button] = state == GLUT_DOWN;
}
static void motion_func(int x, int y)
{
mx = x;
my = y;
}
static void reshape_func(int width, int height)
{
glutSetWindow(win_id);
// glutReshapeWindow(width, height);
win_x = width;
win_y = height;
}
static void idle_func(void)
{
get_from_UI(dens_prev, u_prev, v_prev);
step_cuda(N, u, v, u_prev, v_prev, dens, dens_prev, visc, dt, diff, gpu);
glutSetWindow(win_id);
glutPostRedisplay();
}
static void display_func(void)
{
pre_display();
if (dvel)
draw_velocity();
else
draw_density();
post_display();
}
/*
----------------------------------------------------------------------
open_glut_window --- open a glut compatible window and set callbacks
----------------------------------------------------------------------
*/
static void open_glut_window(void)
{
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition((glutGet(GLUT_SCREEN_WIDTH)-win_x)/2,
(glutGet(GLUT_SCREEN_HEIGHT)-win_y)/2);
glutInitWindowSize(win_x, win_y);
win_id = glutCreateWindow("Fluid Simulator");
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
pre_display();
glutKeyboardFunc(key_func);
glutMouseFunc(mouse_func);
glutMotionFunc(motion_func);
glutReshapeFunc(reshape_func);
glutIdleFunc(idle_func);
glutDisplayFunc(display_func);
}
/*
----------------------------------------------------------------------
main --- main routine
----------------------------------------------------------------------
*/
int main(int argc, char **argv)
{
glutInit(&argc, argv);
if (argc != 1 && argc != 7)
{
fprintf(stderr, "usage : %s N dt diff visc force source\n", argv[0]);
fprintf(stderr, "where:\n");
fprintf(stderr, "\t N : grid resolution\n");
fprintf(stderr, "\t dt : time step\n");
fprintf(stderr, "\t diff : diffusion rate of the density\n");
fprintf(stderr, "\t visc : viscosity of the fluid\n");
fprintf(stderr, "\t force : scales the mouse movement that generate a force\n");
fprintf(stderr, "\t source : amount of density that will be deposited\n");
exit(1);
}
if (argc == 1)
{
N = 128;
dt = 0.1f;
diff = 0.0f;
visc = 0.0f;
force = 5.0f;
source = 100.0f;
fprintf(stderr, "Using defaults : N=%d dt=%g diff=%g visc=%g force=%g source=%g\n",
N, dt, diff, visc, force, source);
}
else
{
N = atoi(argv[1]);
dt = atof(argv[2]);
diff = atof(argv[3]);
visc = atof(argv[4]);
force = atof(argv[5]);
source = atof(argv[6]);
}
printf("\n\nHow to use this demo:\n\n");
printf("\t Add densities with the right mouse button\n");
printf("\t Add velocities with the left mouse button and dragging the mouse\n");
printf("\t Toggle density/velocity display with the 'v' key\n");
printf("\t Clear the simulation by pressing the 'c' key\n");
printf("\t Quit by pressing the 'q' key\n");
printf("\t Save state of the simulation by pressing the 's' key\n");
printf("\t Read state of the simulation by pressing the 'r' key\n");
dvel = 0;
if (!allocate_data())
exit(1);
if (!cuda_allocate_data())
exit(1);
clear_data();
win_x = 1024;
win_y = 1024;
open_glut_window();
glutMainLoop();
exit(0);
}
| 40e4ef0ae2afa2e93fc2d18bfa77653adb86784c.cu | /*
======================================================================
=======================================================================
*/
#include <stdlib.h>
#include <stdio.h>
#include <GL/glut.h>
#include "solver.h"
#include "io.h"
#include "cuda_solver.h"
/* Device Simulation State */
static GPUSTATE gpu;
/* global variables */
static int N;
static float dt, diff, visc;
static float force, source;
static int dvel;
static fluid *u, *v, *u_prev, *v_prev;
static fluid *dens, *dens_prev;
static int win_id;
static int win_x, win_y;
static int mouse_down[3];
static int omx, omy, mx, my;
/*
----------------------------------------------------------------------
free/clear/allocate simulation data
----------------------------------------------------------------------
*/
static void free_data(void)
{
if (u)
cudaFreeHost(u);
if (v)
cudaFreeHost(v);
if (u_prev)
cudaFreeHost(u_prev);
if (v_prev)
cudaFreeHost(v_prev);
if (dens)
cudaFreeHost(dens);
if (dens_prev)
cudaFreeHost(dens_prev);
}
static void free_cuda_data(void)
{
checkCuda(cudaFree(gpu.u));
checkCuda(cudaFree(gpu.u_prev));
checkCuda(cudaFree(gpu.v));
checkCuda(cudaFree(gpu.v_prev));
checkCuda(cudaFree(gpu.dens));
checkCuda(cudaFree(gpu.dens_prev));
}
static void clear_data(void)
{
int i, size = (N + 2) * (N + 2);
for (i = 0; i < size; i++)
{
u[i] = v[i] = u_prev[i] = v_prev[i] = dens[i] = dens_prev[i] = 0.0f;
}
}
static int allocate_data(void)
{
int size = (N + 2) * (N + 2) * sizeof(fluid);
checkCuda(cudaMallocHost((void**)&u, size));
checkCuda(cudaMallocHost((void**)&v, size));
checkCuda(cudaMallocHost((void**)&u_prev, size));
checkCuda(cudaMallocHost((void**)&v_prev, size));
checkCuda(cudaMallocHost((void**)&dens, size));
checkCuda(cudaMallocHost((void**)&dens_prev, size));
if (!u || !v || !u_prev || !v_prev || !dens || !dens_prev)
{
fprintf(stderr, "cannot allocate data\n");
return (0);
}
return (1);
}
static int cuda_allocate_data(void)
{
int size = (N + 2) * (N + 2) * sizeof(fluid);
gpu.u = NULL;
checkCuda(cudaMalloc((void **) &gpu.u, size));
gpu.v = NULL;
checkCuda(cudaMalloc((void **) &gpu.v, size));
gpu.u_prev = NULL;
checkCuda(cudaMalloc((void **) &gpu.u_prev, size));
gpu.v_prev = NULL;
checkCuda(cudaMalloc((void **) &gpu.v_prev, size));
gpu.dens = NULL;
checkCuda(cudaMalloc((void **) &gpu.dens, size));
gpu.dens_prev = NULL;
checkCuda(cudaMalloc((void **) &gpu.dens_prev, size));
if (!gpu.u || !gpu.v || !gpu.u_prev ||
!gpu.v_prev || !gpu.dens || !gpu.dens_prev ) {
return 0;
}
return 1;
}
/*
----------------------------------------------------------------------
OpenGL specific drawing routines
----------------------------------------------------------------------
*/
static void pre_display(void)
{
glViewport(0, 0, win_x, win_y);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, 1.0, 0.0, 1.0);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
static void post_display(void)
{
glutSwapBuffers();
}
static void draw_velocity(void)
{
int i, j;
float x, y, h;
h = 1.0f / N;
glColor3f(1.0f, 1.0f, 1.0f);
glLineWidth(1.0f);
glBegin(GL_LINES);
for (i = 1; i <= N; i++)
{
x = (i - 0.5f) * h;
for (j = 1; j <= N; j++)
{
y = (j - 0.5f) * h;
glVertex2f(x, y);
glVertex2f(x + u[IX(i, j)], y + v[IX(i, j)]);
}
}
glEnd();
}
static void draw_density(void)
{
int i, j;
float x, y, h, d00, d01, d10, d11;
h = 1.0f / N;
glBegin(GL_QUADS);
for (i = 0; i <= N; i++)
{
x = (i - 0.5f) * h;
for (j = 0; j <= N; j++)
{
y = (j - 0.5f) * h;
d00 = dens[IX(i, j)];
d01 = dens[IX(i, j + 1)];
d10 = dens[IX(i + 1, j)];
d11 = dens[IX(i + 1, j + 1)];
glColor3f(d00, d00, d00);
glVertex2f(x, y);
glColor3f(d10, d10, d10);
glVertex2f(x + h, y);
glColor3f(d11, d11, d11);
glVertex2f(x + h, y + h);
glColor3f(d01, d01, d01);
glVertex2f(x, y + h);
}
}
glEnd();
}
/*
----------------------------------------------------------------------
relates mouse movements to forces sources
----------------------------------------------------------------------
*/
static void get_from_UI(fluid *d, fluid *u, fluid *v)
{
int i, j, size = (N + 2) * (N + 2);
int range = (N/128) + 1;
for (i = 0; i < size; i++)
{
u[i] = v[i] = d[i] = 0.0f;
}
if (!mouse_down[0] && !mouse_down[2])
return;
i = (int)((mx / (float)win_x) * N + 1);
j = (int)(((win_y - my) / (float)win_y) * N + 1);
if (i < 1 || i > N || j < 1 || j > N)
return;
if (mouse_down[0])
{
u[IX(i, j)] = force * (mx - omx);
v[IX(i, j)] = force * (omy - my);
}
if (mouse_down[2])
{
for (int ii = -range; ii <= range; ii++)
{
for (int jj = -range; jj <= range; jj++)
{
if (i + ii < 1 || i + ii > N || j + jj < 1 || j + jj > N)
continue;
d[IX(i + ii, j + jj)] = source / (3 + range);
}
}
}
omx = mx;
omy = my;
return;
}
/*
----------------------------------------------------------------------
GLUT callback routines
----------------------------------------------------------------------
*/
static void key_func(unsigned char key, int x, int y)
{
switch (key)
{
case 'c':
case 'C':
clear_data();
break;
case 'q':
case 'Q':
free_data();
free_cuda_data();
exit(0);
break;
case 'v':
case 'V':
dvel = !dvel;
break;
case 's':
case 'S':
save_to_disk("state.fluid", N, u, v, u_prev, v_prev, dens, dens_prev);
break;
case 'r':
case 'R':
read_from_disk("state.fluid", N, u, v, u_prev, v_prev, dens, dens_prev);
break;
}
}
static void mouse_func(int button, int state, int x, int y)
{
omx = mx = x;
omx = my = y;
mouse_down[button] = state == GLUT_DOWN;
}
static void motion_func(int x, int y)
{
mx = x;
my = y;
}
static void reshape_func(int width, int height)
{
glutSetWindow(win_id);
// glutReshapeWindow(width, height);
win_x = width;
win_y = height;
}
static void idle_func(void)
{
get_from_UI(dens_prev, u_prev, v_prev);
step_cuda(N, u, v, u_prev, v_prev, dens, dens_prev, visc, dt, diff, gpu);
glutSetWindow(win_id);
glutPostRedisplay();
}
static void display_func(void)
{
pre_display();
if (dvel)
draw_velocity();
else
draw_density();
post_display();
}
/*
----------------------------------------------------------------------
open_glut_window --- open a glut compatible window and set callbacks
----------------------------------------------------------------------
*/
static void open_glut_window(void)
{
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition((glutGet(GLUT_SCREEN_WIDTH)-win_x)/2,
(glutGet(GLUT_SCREEN_HEIGHT)-win_y)/2);
glutInitWindowSize(win_x, win_y);
win_id = glutCreateWindow("Fluid Simulator");
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
pre_display();
glutKeyboardFunc(key_func);
glutMouseFunc(mouse_func);
glutMotionFunc(motion_func);
glutReshapeFunc(reshape_func);
glutIdleFunc(idle_func);
glutDisplayFunc(display_func);
}
/*
----------------------------------------------------------------------
main --- main routine
----------------------------------------------------------------------
*/
int main(int argc, char **argv)
{
glutInit(&argc, argv);
if (argc != 1 && argc != 7)
{
fprintf(stderr, "usage : %s N dt diff visc force source\n", argv[0]);
fprintf(stderr, "where:\n");
fprintf(stderr, "\t N : grid resolution\n");
fprintf(stderr, "\t dt : time step\n");
fprintf(stderr, "\t diff : diffusion rate of the density\n");
fprintf(stderr, "\t visc : viscosity of the fluid\n");
fprintf(stderr, "\t force : scales the mouse movement that generate a force\n");
fprintf(stderr, "\t source : amount of density that will be deposited\n");
exit(1);
}
if (argc == 1)
{
N = 128;
dt = 0.1f;
diff = 0.0f;
visc = 0.0f;
force = 5.0f;
source = 100.0f;
fprintf(stderr, "Using defaults : N=%d dt=%g diff=%g visc=%g force=%g source=%g\n",
N, dt, diff, visc, force, source);
}
else
{
N = atoi(argv[1]);
dt = atof(argv[2]);
diff = atof(argv[3]);
visc = atof(argv[4]);
force = atof(argv[5]);
source = atof(argv[6]);
}
printf("\n\nHow to use this demo:\n\n");
printf("\t Add densities with the right mouse button\n");
printf("\t Add velocities with the left mouse button and dragging the mouse\n");
printf("\t Toggle density/velocity display with the 'v' key\n");
printf("\t Clear the simulation by pressing the 'c' key\n");
printf("\t Quit by pressing the 'q' key\n");
printf("\t Save state of the simulation by pressing the 's' key\n");
printf("\t Read state of the simulation by pressing the 'r' key\n");
dvel = 0;
if (!allocate_data())
exit(1);
if (!cuda_allocate_data())
exit(1);
clear_data();
win_x = 1024;
win_y = 1024;
open_glut_window();
glutMainLoop();
exit(0);
}
|
f7e40bf3b4f176a134fd07d90aeb4aeb5d3f5037.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// needs to be compiled with option -arch=sm_20 to work
#include <stdio.h>
#define CUDA_CHECK(cmd) {hipError_t error = cmd; if(error!=hipSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", hipGetErrorString(error));}}
#define CUDA_CHECK_KERNEL {hipError_t error = hipGetLastError(); if(error!=hipSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", hipGetErrorString(error));}}
#define N 1000000
#define DELTA 0.001f
__global__ void init_c(float *c){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if (idx==0) *c=0.0f;
}
__global__ void scalarp(float *a, float *b, float *c){
// get global thread id
int tid = blockIdx.x*blockDim.x+threadIdx.x;
int idx = tid;
float temp;
if (idx<N) {
// multiply own elements
temp=a[tid]*b[tid];
// add to result
//atomicAdd(c,temp);
atomicAdd(c,a[tid]*b[tid]);
}
}
int main (int argc, char **argv){
float a_host[N], b_host[N], c_host, d_host=0.0f;
float *a_device, *b_device, *c_device;
int i;
int blocksize=256;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil(N/(float)blocksize));
for (i=0;i<N;i++) a_host[i]=1.0f*i;
for (i=0;i<N;i++) b_host[i]=1.0f*i;
// alloc device memory
CUDA_CHECK(hipMalloc((void**)&a_device,N*sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&b_device,N*sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&c_device,sizeof(float)));
// transfer data
CUDA_CHECK(hipMemcpy(a_device,a_host,N*sizeof(float),hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(b_device,b_host,N*sizeof(float),hipMemcpyHostToDevice));
// set c to 0
hipLaunchKernelGGL(( init_c), dim3(1),dim3(1), 0, 0, c_device);
CUDA_CHECK_KERNEL;
// invoke scalarp kernel
hipLaunchKernelGGL(( scalarp), dim3(dimGrid),dim3(dimBlock), 0, 0, a_device,b_device,c_device);
CUDA_CHECK_KERNEL;
// transfer data
CUDA_CHECK(hipMemcpy(&c_host,c_device,sizeof(float),hipMemcpyDeviceToHost));
for (i=0;i<N;i++) d_host+=a_host[i]*b_host[i];
if ((abs(d_host - c_host) > DELTA*c_host)) printf("Solution invalid. GPU has %g, CPU has %g\n",c_host,d_host);
// free device
CUDA_CHECK(hipFree(a_device));
CUDA_CHECK(hipFree(b_device));
CUDA_CHECK(hipFree(c_device));
return 0;
}
| f7e40bf3b4f176a134fd07d90aeb4aeb5d3f5037.cu | // needs to be compiled with option -arch=sm_20 to work
#include <stdio.h>
#define CUDA_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}}
#define CUDA_CHECK_KERNEL {cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}}
#define N 1000000
#define DELTA 0.001f
__global__ void init_c(float *c){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if (idx==0) *c=0.0f;
}
__global__ void scalarp(float *a, float *b, float *c){
// get global thread id
int tid = blockIdx.x*blockDim.x+threadIdx.x;
int idx = tid;
float temp;
if (idx<N) {
// multiply own elements
temp=a[tid]*b[tid];
// add to result
//atomicAdd(c,temp);
atomicAdd(c,a[tid]*b[tid]);
}
}
int main (int argc, char **argv){
float a_host[N], b_host[N], c_host, d_host=0.0f;
float *a_device, *b_device, *c_device;
int i;
int blocksize=256;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil(N/(float)blocksize));
for (i=0;i<N;i++) a_host[i]=1.0f*i;
for (i=0;i<N;i++) b_host[i]=1.0f*i;
// alloc device memory
CUDA_CHECK(cudaMalloc((void**)&a_device,N*sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_device,N*sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&c_device,sizeof(float)));
// transfer data
CUDA_CHECK(cudaMemcpy(a_device,a_host,N*sizeof(float),cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(b_device,b_host,N*sizeof(float),cudaMemcpyHostToDevice));
// set c to 0
init_c<<<1,1>>>(c_device);
CUDA_CHECK_KERNEL;
// invoke scalarp kernel
scalarp<<<dimGrid,dimBlock>>>(a_device,b_device,c_device);
CUDA_CHECK_KERNEL;
// transfer data
CUDA_CHECK(cudaMemcpy(&c_host,c_device,sizeof(float),cudaMemcpyDeviceToHost));
for (i=0;i<N;i++) d_host+=a_host[i]*b_host[i];
if ((abs(d_host - c_host) > DELTA*c_host)) printf("Solution invalid. GPU has %g, CPU has %g\n",c_host,d_host);
// free device
CUDA_CHECK(cudaFree(a_device));
CUDA_CHECK(cudaFree(b_device));
CUDA_CHECK(cudaFree(c_device));
return 0;
}
|
a33289ded1ec948cb7f4035182570d561978ce70.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
@author Azzam Haidar
@author Tingxing Dong
*/
#include "common_magma.h"
///////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_set_pointer(magmaDoubleComplex **output_array,
magmaDoubleComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset)
{
output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda;
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
extern "C"
void zset_pointer(magmaDoubleComplex **output_array,
magmaDoubleComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
/*
convert consecutive stored variable to array stored
for example the size of A is N*batchCount; N is the size of A(batch_offset)
change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N
*/
hipLaunchKernelGGL(( kernel_set_pointer), dim3(batchCount), dim3(1), 0, queue, output_array, input, lda, row, column, batch_offset);
}
__global__ void zdisplace_pointers_kernel(magmaDoubleComplex **output_array,
magmaDoubleComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column)
{
magmaDoubleComplex *inpt = input_array[blockIdx.x];
output_array[blockIdx.x] = &inpt[row + column * lda];
}
extern "C"
void magma_zdisplace_pointers(magmaDoubleComplex **output_array,
magmaDoubleComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda * column;
*/
hipLaunchKernelGGL(( zdisplace_pointers_kernel), dim3(batchCount), dim3(1), 0, queue, output_array, input_array, lda, row, column);
}
| a33289ded1ec948cb7f4035182570d561978ce70.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
@author Azzam Haidar
@author Tingxing Dong
*/
#include "common_magma.h"
///////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_set_pointer(magmaDoubleComplex **output_array,
magmaDoubleComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset)
{
output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda;
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
extern "C"
void zset_pointer(magmaDoubleComplex **output_array,
magmaDoubleComplex *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
/*
convert consecutive stored variable to array stored
for example the size of A is N*batchCount; N is the size of A(batch_offset)
change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N
*/
kernel_set_pointer<<<batchCount, 1, 0, queue>>>(output_array, input, lda, row, column, batch_offset);
}
__global__ void zdisplace_pointers_kernel(magmaDoubleComplex **output_array,
magmaDoubleComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column)
{
magmaDoubleComplex *inpt = input_array[blockIdx.x];
output_array[blockIdx.x] = &inpt[row + column * lda];
}
extern "C"
void magma_zdisplace_pointers(magmaDoubleComplex **output_array,
magmaDoubleComplex **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda * column;
*/
zdisplace_pointers_kernel<<<batchCount, 1, 0, queue>>>(output_array, input_array, lda, row, column);
}
|
d8d7adb8123259ac2a2e648862a17c9828c820b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <fstream>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <ctime>
#include <opencv2/opencv.hpp>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s,%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(1);\
}\
}
struct RGB
{
uchar R;
uchar G;
uchar B;
__device__ void Set(float r, float g, float b)
{
R = (uchar)(r * 255);
G = (uchar)(g * 255);
B = (uchar)(b * 255);
}
__device__ void Set(uchar r, uchar g, uchar b)
{
R = r;
G = g;
B = b;
}
};
__device__ RGB ConvertHSVtoRGB(float H, float S, float V)
{
float C = V * S;
float HH = H / 60;
float X = C * (1.0f - fabs(fmod(HH, 2.0f) - 1.0f));
RGB rgb_temp;
if (HH >= 0 && HH <= 1)
{
rgb_temp.Set(C,X,0);
}
else if (HH >= 1 && HH <= 2)
{
rgb_temp.Set(X,C,0);
}
else if (HH >= 2 && HH <= 3)
{
rgb_temp.Set(0,C,X);
}
else if (HH >= 3 && HH <= 4)
{
rgb_temp.Set(0,X,C);
}
else if (HH >= 4 && HH <= 5)
{
rgb_temp.Set(X,0,C);
}
else if (HH >= 5 && HH < 6)
{
rgb_temp.Set(C,0,X);
}
uchar m = (uchar)((V - C) * 255);
rgb_temp.Set((uchar)(rgb_temp.R + m), (uchar)(rgb_temp.G + m), (uchar)(rgb_temp.B + m));
return rgb_temp;
}
__device__ float function_table(float in)//-407
{
//5, -407.0, 0.1,//8752
// 3345.0, 70.0,//5000
// 7344.0, 135.0,//1000
// 8345.0, 150.0,//0
// 8752.0, 250.0,
float input[5] = {-407.0f,3345.0f, 7344.0f,8345.0f,8752.0f};
float output[5] = {0.1f,70.0f,135.0f,150.0f,250.0f};
float out;
if (in <= input[0])
{
out = output[0];
}
else if (in > input[0] && in < input[4])
{
for (int ii = 0; ii < 4; ii++)
{
if (in > input[ii] && in <= input[ii+1])
{
out = output[ii] + (in - input[ii]) / (input[ii + 1] - input[ii]) * (output[ii + 1] - output[ii]);
break;
}
}
}
else
{
out = output[4];
}
return out;
}
__global__ void ElevationToRGB(short *elev, RGB* rgbValues)
{
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (threadId >= 10800 * 4)
return;
short mb = elev[threadId];
if (mb > 8752)
mb = 8752;
else if (mb < -407)
mb = -407;
mb = 8752 - 407 - mb;
float H = function_table(mb);
RGB rgb = ConvertHSVtoRGB(H,1,1);
rgbValues[threadId] = rgb;
}
int main(int argc, char **argv)
{
int totalColumns = 10800 * 4;
int columns = 10800;
int rows[4] = {4800, 6000, 6000, 4800};
int totalRows = rows[0] + rows[1] + rows[2] + rows[3];
const char* fileNames[4][4] = { "all10//a10g", "all10//b10g","all10//c10g","all10//d10g",
"all10//e10g", "all10//f10g","all10//g10g","all10//h10g",
"all10//i10g", "all10//j10g","all10//k10g","all10//l10g",
"all10//m10g", "all10//n10g","all10//o10g","all10//p10g"};
short *memblock = new short[totalColumns * totalRows];
int rowCount = 0;
for (int ii = 0; ii < 4; ii++)
{
std::ifstream file1(fileNames[ii][0], std::ios::in | std::ios::binary);
std::ifstream file2(fileNames[ii][1], std::ios::in | std::ios::binary);
std::ifstream file3(fileNames[ii][2], std::ios::in | std::ios::binary);
std::ifstream file4(fileNames[ii][3], std::ios::in | std::ios::binary);
file1.seekg(0, std::ios::beg);
file2.seekg(0, std::ios::beg);
file3.seekg(0, std::ios::beg);
file4.seekg(0, std::ios::beg);
if (file1.is_open() && file2.is_open() && file3.is_open() && file4.is_open()/* && frame.is_open()*/)
{
for (int jj = 0; jj < rows[ii]; jj++)
{
file1.read((char*)&memblock[totalColumns * rowCount + 0], columns * sizeof(short));
file2.read((char*)&memblock[totalColumns * rowCount + columns], columns * sizeof(short));
file3.read((char*)&memblock[totalColumns * rowCount + columns * 2], columns * sizeof(short));
file4.read((char*)&memblock[totalColumns * rowCount + columns * 3], columns * sizeof(short));
rowCount++;
}
file1.close();
file2.close();
file3.close();
file4.close();
}
else
{
std::cout << "batch: " << ii << " one of the files unable to open" << std::endl;
std::cin.get();
return 0;
}
}
std::cout << "Finished reading elevations" << std::endl;
cv::Size globeSize(totalColumns, totalRows);
cv::Mat globeImage(globeSize, CV_8UC3);
std::cout << "Created globeImage" << std::endl;
short *d_memblock;
CHECK(hipMalloc((short **)&d_memblock, totalColumns * sizeof(short)));
RGB *d_rgbValues;
CHECK(hipMalloc((RGB **)&d_rgbValues, totalColumns * sizeof(RGB)));
//RGB *h_rgbValues = (RGB*)malloc(sizeof(RGB) * totalColumns);
RGB *h_rgbValues = new RGB[totalColumns];
clock_t startTimeCuda = clock();
rowCount = 0;
while (rowCount < totalRows)
{
CHECK(hipMemcpy(d_memblock, &memblock[rowCount * totalColumns], sizeof(short) * totalColumns, hipMemcpyHostToDevice));
int blockSize = 1024;
int numBlocks = (totalColumns + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ElevationToRGB) , dim3(numBlocks), dim3(blockSize), 0, 0, d_memblock,d_rgbValues);
hipError_t err = hipGetLastError();
char any;
if (err != hipSuccess)
{
printf("Error: %s\n", hipGetErrorString(err));
std::cin >> any;
}
CHECK(hipDeviceSynchronize());
CHECK(hipMemcpy(h_rgbValues, d_rgbValues, sizeof(RGB) * totalColumns, hipMemcpyDeviceToHost));
uchar *ptr = globeImage.ptr(rowCount);
for (int col = 0; col < globeImage.cols; col++)
{
uchar * uc_pixel = ptr;
uc_pixel[0] = h_rgbValues[col].B;
uc_pixel[1] = h_rgbValues[col].G;
uc_pixel[2] = h_rgbValues[col].R;
ptr += 3;
}
rowCount++;
}
std::cout << "Finished computing rgb values in " << double( clock() - startTimeCuda ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
std::cout << "Started globe.png write" << std::endl;
clock_t startTime = clock();
cv::imwrite( "globe.png", globeImage );
std::cout << "Finished globe.png in " << double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
delete [] memblock;
hipFree(d_memblock);
hipFree(d_rgbValues);
delete [] h_rgbValues;
hipDeviceReset();
}
| d8d7adb8123259ac2a2e648862a17c9828c820b5.cu | #include <algorithm>
#include <fstream>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <ctime>
#include <opencv2/opencv.hpp>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s,%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1);\
}\
}
struct RGB
{
uchar R;
uchar G;
uchar B;
__device__ void Set(float r, float g, float b)
{
R = (uchar)(r * 255);
G = (uchar)(g * 255);
B = (uchar)(b * 255);
}
__device__ void Set(uchar r, uchar g, uchar b)
{
R = r;
G = g;
B = b;
}
};
__device__ RGB ConvertHSVtoRGB(float H, float S, float V)
{
float C = V * S;
float HH = H / 60;
float X = C * (1.0f - fabs(fmod(HH, 2.0f) - 1.0f));
RGB rgb_temp;
if (HH >= 0 && HH <= 1)
{
rgb_temp.Set(C,X,0);
}
else if (HH >= 1 && HH <= 2)
{
rgb_temp.Set(X,C,0);
}
else if (HH >= 2 && HH <= 3)
{
rgb_temp.Set(0,C,X);
}
else if (HH >= 3 && HH <= 4)
{
rgb_temp.Set(0,X,C);
}
else if (HH >= 4 && HH <= 5)
{
rgb_temp.Set(X,0,C);
}
else if (HH >= 5 && HH < 6)
{
rgb_temp.Set(C,0,X);
}
uchar m = (uchar)((V - C) * 255);
rgb_temp.Set((uchar)(rgb_temp.R + m), (uchar)(rgb_temp.G + m), (uchar)(rgb_temp.B + m));
return rgb_temp;
}
__device__ float function_table(float in)//-407
{
//5, -407.0, 0.1,//8752
// 3345.0, 70.0,//5000
// 7344.0, 135.0,//1000
// 8345.0, 150.0,//0
// 8752.0, 250.0,
float input[5] = {-407.0f,3345.0f, 7344.0f,8345.0f,8752.0f};
float output[5] = {0.1f,70.0f,135.0f,150.0f,250.0f};
float out;
if (in <= input[0])
{
out = output[0];
}
else if (in > input[0] && in < input[4])
{
for (int ii = 0; ii < 4; ii++)
{
if (in > input[ii] && in <= input[ii+1])
{
out = output[ii] + (in - input[ii]) / (input[ii + 1] - input[ii]) * (output[ii + 1] - output[ii]);
break;
}
}
}
else
{
out = output[4];
}
return out;
}
__global__ void ElevationToRGB(short *elev, RGB* rgbValues)
{
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if (threadId >= 10800 * 4)
return;
short mb = elev[threadId];
if (mb > 8752)
mb = 8752;
else if (mb < -407)
mb = -407;
mb = 8752 - 407 - mb;
float H = function_table(mb);
RGB rgb = ConvertHSVtoRGB(H,1,1);
rgbValues[threadId] = rgb;
}
int main(int argc, char **argv)
{
int totalColumns = 10800 * 4;
int columns = 10800;
int rows[4] = {4800, 6000, 6000, 4800};
int totalRows = rows[0] + rows[1] + rows[2] + rows[3];
const char* fileNames[4][4] = { "all10//a10g", "all10//b10g","all10//c10g","all10//d10g",
"all10//e10g", "all10//f10g","all10//g10g","all10//h10g",
"all10//i10g", "all10//j10g","all10//k10g","all10//l10g",
"all10//m10g", "all10//n10g","all10//o10g","all10//p10g"};
short *memblock = new short[totalColumns * totalRows];
int rowCount = 0;
for (int ii = 0; ii < 4; ii++)
{
std::ifstream file1(fileNames[ii][0], std::ios::in | std::ios::binary);
std::ifstream file2(fileNames[ii][1], std::ios::in | std::ios::binary);
std::ifstream file3(fileNames[ii][2], std::ios::in | std::ios::binary);
std::ifstream file4(fileNames[ii][3], std::ios::in | std::ios::binary);
file1.seekg(0, std::ios::beg);
file2.seekg(0, std::ios::beg);
file3.seekg(0, std::ios::beg);
file4.seekg(0, std::ios::beg);
if (file1.is_open() && file2.is_open() && file3.is_open() && file4.is_open()/* && frame.is_open()*/)
{
for (int jj = 0; jj < rows[ii]; jj++)
{
file1.read((char*)&memblock[totalColumns * rowCount + 0], columns * sizeof(short));
file2.read((char*)&memblock[totalColumns * rowCount + columns], columns * sizeof(short));
file3.read((char*)&memblock[totalColumns * rowCount + columns * 2], columns * sizeof(short));
file4.read((char*)&memblock[totalColumns * rowCount + columns * 3], columns * sizeof(short));
rowCount++;
}
file1.close();
file2.close();
file3.close();
file4.close();
}
else
{
std::cout << "batch: " << ii << " one of the files unable to open" << std::endl;
std::cin.get();
return 0;
}
}
std::cout << "Finished reading elevations" << std::endl;
cv::Size globeSize(totalColumns, totalRows);
cv::Mat globeImage(globeSize, CV_8UC3);
std::cout << "Created globeImage" << std::endl;
short *d_memblock;
CHECK(cudaMalloc((short **)&d_memblock, totalColumns * sizeof(short)));
RGB *d_rgbValues;
CHECK(cudaMalloc((RGB **)&d_rgbValues, totalColumns * sizeof(RGB)));
//RGB *h_rgbValues = (RGB*)malloc(sizeof(RGB) * totalColumns);
RGB *h_rgbValues = new RGB[totalColumns];
clock_t startTimeCuda = clock();
rowCount = 0;
while (rowCount < totalRows)
{
CHECK(cudaMemcpy(d_memblock, &memblock[rowCount * totalColumns], sizeof(short) * totalColumns, cudaMemcpyHostToDevice));
int blockSize = 1024;
int numBlocks = (totalColumns + blockSize - 1) / blockSize;
ElevationToRGB <<<numBlocks, blockSize>>>(d_memblock,d_rgbValues);
cudaError_t err = cudaGetLastError();
char any;
if (err != cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
std::cin >> any;
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemcpy(h_rgbValues, d_rgbValues, sizeof(RGB) * totalColumns, cudaMemcpyDeviceToHost));
uchar *ptr = globeImage.ptr(rowCount);
for (int col = 0; col < globeImage.cols; col++)
{
uchar * uc_pixel = ptr;
uc_pixel[0] = h_rgbValues[col].B;
uc_pixel[1] = h_rgbValues[col].G;
uc_pixel[2] = h_rgbValues[col].R;
ptr += 3;
}
rowCount++;
}
std::cout << "Finished computing rgb values in " << double( clock() - startTimeCuda ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
std::cout << "Started globe.png write" << std::endl;
clock_t startTime = clock();
cv::imwrite( "globe.png", globeImage );
std::cout << "Finished globe.png in " << double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
delete [] memblock;
cudaFree(d_memblock);
cudaFree(d_rgbValues);
delete [] h_rgbValues;
cudaDeviceReset();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.