hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
cdeb18b85a070e610ded57f5b53929d7ab0bbca4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 *=ConstArray1[(tid+5)%THREADS_PER_BLOCK];
Value2 += A[tid]* I1[(i+j)%THREADS_PER_BLOCK];
sum*=B[tid]+Value2;
Value1 /= I2[(i+j+5)%THREADS_PER_BLOCK]+A[tid];
Value2 *= I2[(i+j)%THREADS_PER_BLOCK]+tex1Dfetch(texmem2,tid*j);
sum/=log2(ConstArray1[(tid+10)%THREADS_PER_BLOCK]);
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid]+Value2;
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
hipMalloc((void**) &device_texture1, size1);
hipMalloc((void**) &device_texture2, size1);
hipMalloc((void**) &device_texture3, size1);
hipMalloc((void**) &device_texture4, size1);
hipMemcpy(device_texture1, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, size1, hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, size1);
hipBindTexture(0, texmem2, device_texture2, size1);
hipBindTexture(0, texmem3, device_texture3, size1);
hipBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
cdeb18b85a070e610ded57f5b53929d7ab0bbca4.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 *=ConstArray1[(tid+5)%THREADS_PER_BLOCK];
Value2 += A[tid]* I1[(i+j)%THREADS_PER_BLOCK];
sum*=B[tid]+Value2;
Value1 /= I2[(i+j+5)%THREADS_PER_BLOCK]+A[tid];
Value2 *= I2[(i+j)%THREADS_PER_BLOCK]+tex1Dfetch(texmem2,tid*j);
sum/=log2(ConstArray1[(tid+10)%THREADS_PER_BLOCK]);
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid]+Value2;
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
cudaMalloc((void**) &device_texture1, size1);
cudaMalloc((void**) &device_texture2, size1);
cudaMalloc((void**) &device_texture3, size1);
cudaMalloc((void**) &device_texture4, size1);
cudaMemcpy(device_texture1, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, size1, cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, size1);
cudaBindTexture(0, texmem2, device_texture2, size1);
cudaBindTexture(0, texmem3, device_texture3, size1);
cudaBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
8e5cf37bd7432e134b80e9f87abbfff8760b8497.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef __lcl_main
#define __lcl_main
#define NX 512//256
#define BATCH 1
#include "constants.cu"
#include "matches.cu"
#include "FilterMatches.cu"
#include "fftWork.cu"
#include <stdio.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <cufftw.h>
#include <cmath>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include "constants.cu"
#include "matches.cu"
#include "FilterMatches.cu"
#include <iostream>
MicData generateData()
{
//create array of batches(microphones)
int numberOfBatches = -1;
std::cout<<"Number of microphones(must be at least 3): ";
std::cin>>numberOfBatches;
hipfftDoubleComplex** micDataArray = (hipfftDoubleComplex**)malloc(sizeof(hipfftDoubleComplex*)*numberOfBatches);
//length of each mic's array should be the same as int* waveLengths
//create array to store waves
int numberOfWaves = -1;
std::cout<<"Number of waves: ";
std::cin>>numberOfWaves;
//int** wavesArray = (int**)malloc(sizeof(int*) * numberOfWaves);
int* waveLengths = (int*)malloc(sizeof(int) * numberOfWaves);
for(int batchNum=0; batchNum<numberOfBatches; batchNum++){
//initialize h_data
hipfftDoubleComplex* h_data = (hipfftDoubleComplex*)malloc(sizeof(hipfftDoubleComplex)*NX);
for (unsigned int i = 0; i < NX; i++){
h_data[i].x = 0;
h_data[i].y = 0.0;
}
srand (time(NULL));
/* generate secret number between 1 and 10: */
bool addNoise = 0;
for (unsigned int i = 0; i < numberOfWaves; i++)
{
double freq = 0;
std::cout<< "Wave frequency (whole numbers only): ";
std::cin>>freq;
std::cout<<"add noise? (0 = no, 1 = yes): ";
std::cin>>addNoise;
for (unsigned int i = 0; i < NX; i++){
h_data[i].x += sin( 2 * M_PI * freq * (double)i / NX);
double noise = 0;
if (addNoise)
{
noise = (double)rand() / RAND_MAX;
if (rand() % 2){
noise *= -1;
}
}
h_data[i].x += noise;
}
}
micDataArray[batchNum]=h_data;
}
MicData micData = {micDataArray, waveLengths, numberOfBatches};
return micData;
}
int main() {
MicData h_micData = generateData();
FftBatch* fftBatches = (FftBatch*)malloc(sizeof(FftBatch) * h_micData.numberOfBatches);
for (unsigned int i = 0; i < h_micData.numberOfBatches; i++)
{
printf("3.%i\r\n", i);
getFftBatch(&fftBatches[i], h_micData.micData[i]);//TODO: this must take a size parameter if batches can have different sizes
}
printf("4\r\n");
//find all matches across FFT batches
WaveMatches matches = findAllMatches(fftBatches, h_micData.numberOfBatches);
printf("5\r\n");
//allocate memory for wave pair containers
WavePairContainer* wavePairContainers;
wavePairContainers = (WavePairContainer*)malloc(sizeof(WavePairContainer) * matches.matches.size());
printf("6\r\n");
//Filter matches into wavePairContainers
filterMatches(fftBatches,
h_micData.numberOfBatches,
&matches,
wavePairContainers,
matches.matches.size());
//h_micData.numberOfBatches
GpuWaveMatches* d_gpuWaveMatches;
WaveMatchesToGpu(matches, d_gpuWaveMatches);
printf("Has not died yet\r\n");fflush(NULL);
findWavePairs(fftBatches,
h_micData.numberOfBatches,
d_gpuWaveMatches,
wavePairContainers);
for (unsigned int i = 0; i < matches.matches.size(); i++)
{
for (unsigned int j = 0; j < wavePairContainers[i].wavePairCount; j++)
{
std::cout << "Mic: "<< wavePairContainers[i].firstFFT
<< ", and Mic: "<< wavePairContainers[i].secondFFT
<<" -- frequency: " << 2 * wavePairContainers[i].wavePairArray[j].waveIdx1
<< "-" << 2 * wavePairContainers[i].wavePairArray[j].waveIdx2 << std::endl;
}
}
for(int i=0; i<h_micData.numberOfBatches; i++){
//free(MicData[i]->h_data);//allocated within generate data
}
free (fftBatches);
return 0;
}
#endif
|
8e5cf37bd7432e134b80e9f87abbfff8760b8497.cu
|
#ifndef __lcl_main
#define __lcl_main
#define NX 512//256
#define BATCH 1
#include "constants.cu"
#include "matches.cu"
#include "FilterMatches.cu"
#include "fftWork.cu"
#include <stdio.h>
#include <cufft.h>
#include <cufftXt.h>
#include <cufftw.h>
#include <cmath>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include "constants.cu"
#include "matches.cu"
#include "FilterMatches.cu"
#include <iostream>
MicData generateData()
{
//create array of batches(microphones)
int numberOfBatches = -1;
std::cout<<"Number of microphones(must be at least 3): ";
std::cin>>numberOfBatches;
cufftDoubleComplex** micDataArray = (cufftDoubleComplex**)malloc(sizeof(cufftDoubleComplex*)*numberOfBatches);
//length of each mic's array should be the same as int* waveLengths
//create array to store waves
int numberOfWaves = -1;
std::cout<<"Number of waves: ";
std::cin>>numberOfWaves;
//int** wavesArray = (int**)malloc(sizeof(int*) * numberOfWaves);
int* waveLengths = (int*)malloc(sizeof(int) * numberOfWaves);
for(int batchNum=0; batchNum<numberOfBatches; batchNum++){
//initialize h_data
cufftDoubleComplex* h_data = (cufftDoubleComplex*)malloc(sizeof(cufftDoubleComplex)*NX);
for (unsigned int i = 0; i < NX; i++){
h_data[i].x = 0;
h_data[i].y = 0.0;
}
srand (time(NULL));
/* generate secret number between 1 and 10: */
bool addNoise = 0;
for (unsigned int i = 0; i < numberOfWaves; i++)
{
double freq = 0;
std::cout<< "Wave frequency (whole numbers only): ";
std::cin>>freq;
std::cout<<"add noise? (0 = no, 1 = yes): ";
std::cin>>addNoise;
for (unsigned int i = 0; i < NX; i++){
h_data[i].x += sin( 2 * M_PI * freq * (double)i / NX);
double noise = 0;
if (addNoise)
{
noise = (double)rand() / RAND_MAX;
if (rand() % 2){
noise *= -1;
}
}
h_data[i].x += noise;
}
}
micDataArray[batchNum]=h_data;
}
MicData micData = {micDataArray, waveLengths, numberOfBatches};
return micData;
}
int main() {
MicData h_micData = generateData();
FftBatch* fftBatches = (FftBatch*)malloc(sizeof(FftBatch) * h_micData.numberOfBatches);
for (unsigned int i = 0; i < h_micData.numberOfBatches; i++)
{
printf("3.%i\r\n", i);
getFftBatch(&fftBatches[i], h_micData.micData[i]);//TODO: this must take a size parameter if batches can have different sizes
}
printf("4\r\n");
//find all matches across FFT batches
WaveMatches matches = findAllMatches(fftBatches, h_micData.numberOfBatches);
printf("5\r\n");
//allocate memory for wave pair containers
WavePairContainer* wavePairContainers;
wavePairContainers = (WavePairContainer*)malloc(sizeof(WavePairContainer) * matches.matches.size());
printf("6\r\n");
//Filter matches into wavePairContainers
filterMatches(fftBatches,
h_micData.numberOfBatches,
&matches,
wavePairContainers,
matches.matches.size());
//h_micData.numberOfBatches
GpuWaveMatches* d_gpuWaveMatches;
WaveMatchesToGpu(matches, d_gpuWaveMatches);
printf("Has not died yet\r\n");fflush(NULL);
findWavePairs(fftBatches,
h_micData.numberOfBatches,
d_gpuWaveMatches,
wavePairContainers);
for (unsigned int i = 0; i < matches.matches.size(); i++)
{
for (unsigned int j = 0; j < wavePairContainers[i].wavePairCount; j++)
{
std::cout << "Mic: "<< wavePairContainers[i].firstFFT
<< ", and Mic: "<< wavePairContainers[i].secondFFT
<<" -- frequency: " << 2 * wavePairContainers[i].wavePairArray[j].waveIdx1
<< "-" << 2 * wavePairContainers[i].wavePairArray[j].waveIdx2 << std::endl;
}
}
for(int i=0; i<h_micData.numberOfBatches; i++){
//free(MicData[i]->h_data);//allocated within generate data
}
free (fftBatches);
return 0;
}
#endif
|
10eeaadf65eaac640837a1f9a7ee9e1d26890d4d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_drop.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *xmask = NULL;
hipMalloc(&xmask, XSIZE*YSIZE);
float dropout = 1;
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_drop), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,xmask,dropout,scale);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_drop), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,xmask,dropout,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_drop), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,xmask,dropout,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
10eeaadf65eaac640837a1f9a7ee9e1d26890d4d.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_drop.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *xmask = NULL;
cudaMalloc(&xmask, XSIZE*YSIZE);
float dropout = 1;
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_drop<<<gridBlock,threadBlock>>>(n,x,xmask,dropout,scale);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_drop<<<gridBlock,threadBlock>>>(n,x,xmask,dropout,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_drop<<<gridBlock,threadBlock>>>(n,x,xmask,dropout,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
1758e22a2849ad3b2c49944b5ecd5d6709347301.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void multiply(int *result, int *A, int *B)
{
/* OLD logic
We have a 3 by 3 grid and each block has 3 threads.
So rows = block x id, cols = block y id
So Indices will be C[block X id][block Y id] = A[block X id][threads 0, 1, 2] * B[threads 0, 1, 2][block y id]
*/
//__shared__ int result[_size*_size] ;
/*result[blockIdx.x*blockDim.x +blockIdx.y] += A[blockIdx.x*blockDim.x + threadIdx.x]*B[blockDim.x*threadIdx.x+blockIdx.y];
printf("C[%d] = A[%d]*B[%d] = %d*%d\n",blockIdx.x*blockDim.x +blockIdx.y, blockIdx.x*blockDim.x + threadIdx.x, blockDim.x*threadIdx.x+blockIdx.y,
A[blockIdx.x*blockDim.x + threadIdx.x],B[blockDim.x*threadIdx.x+blockIdx.y]);
Res[blockIdx.x*blockDim.x +blockIdx.y]= result[blockIdx.x*blockDim.x +blockIdx.y];*/
/* NEW logic
I have 3 blocks and 3 threads. Each thread calculates entry for each position compared to the old one having each thread multiplying one value.
So indices will be result[block x id][thread id] = A[block x id][i]* B[i][thread x id]
*/
for(int i=0; i<_size;i++)
{
result[blockIdx.x*blockDim.x +threadIdx.x] += A[blockIdx.x*blockDim.x+i]*B[blockDim.x*i+threadIdx.x];
}
}
|
1758e22a2849ad3b2c49944b5ecd5d6709347301.cu
|
#include "includes.h"
__global__ void multiply(int *result, int *A, int *B)
{
/* OLD logic
We have a 3 by 3 grid and each block has 3 threads.
So rows = block x id, cols = block y id
So Indices will be C[block X id][block Y id] = A[block X id][threads 0, 1, 2] * B[threads 0, 1, 2][block y id]
*/
//__shared__ int result[_size*_size] ;
/*result[blockIdx.x*blockDim.x +blockIdx.y] += A[blockIdx.x*blockDim.x + threadIdx.x]*B[blockDim.x*threadIdx.x+blockIdx.y];
printf("C[%d] = A[%d]*B[%d] = %d*%d\n",blockIdx.x*blockDim.x +blockIdx.y, blockIdx.x*blockDim.x + threadIdx.x, blockDim.x*threadIdx.x+blockIdx.y,
A[blockIdx.x*blockDim.x + threadIdx.x],B[blockDim.x*threadIdx.x+blockIdx.y]);
Res[blockIdx.x*blockDim.x +blockIdx.y]= result[blockIdx.x*blockDim.x +blockIdx.y];*/
/* NEW logic
I have 3 blocks and 3 threads. Each thread calculates entry for each position compared to the old one having each thread multiplying one value.
So indices will be result[block x id][thread id] = A[block x id][i]* B[i][thread x id]
*/
for(int i=0; i<_size;i++)
{
result[blockIdx.x*blockDim.x +threadIdx.x] += A[blockIdx.x*blockDim.x+i]*B[blockDim.x*i+threadIdx.x];
}
}
|
bc646de0310f3ba9265de016cda0351999d27e0c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
}
|
bc646de0310f3ba9265de016cda0351999d27e0c.cu
|
#include "includes.h"
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
}
|
f07034ced8d78de6fb677f455e2c4fb96e1d7d64.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
static int z_print_props()
{
int count = 0;
int dev = 0;
hipDeviceProp_t prop;
hipError_t err = hipGetDeviceCount(&count);
if(err != hipSuccess) {
fprintf(stderr, "cudacudaGetDeviceCount: %s\n", hipGetErrorString(err));
return 1;
}
for(dev = 0; dev < count; dev ++) {
err = hipGetDeviceProperties(&prop, dev);
if(err != hipSuccess) {
fprintf(stderr, "cudacudaGetDeviceCount: %s\n", hipGetErrorString(err));
return 2;
}
printf("DEVICE: %d\n===================\n", dev);
printf("\tname: %s\n", prop.name);
printf("\ttotalGlobalMem: %u\n", prop.totalGlobalMem);
printf("\tsharedMemPerBlock: %u\n", prop.sharedMemPerBlock);
printf("\tregsPerBlock: %d\n", prop.regsPerBlock);
printf("\twarpSize: %d\n", prop.warpSize);
printf("\tmemPitch: %u\n", prop.memPitch);
printf("\tmaxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("\tmaxThreadsDim[3]: %d, %d, %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("\tmaxGridSize[3]: %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\tclockRate: %d\n", prop.clockRate);
printf("\ttotalConstMem: %u\n", prop.totalConstMem);
printf("\tmajor: %d\n", prop.major);
printf("\tminor: %d\n", prop.minor);
printf("\ttextureAlignment: %u\n", prop.textureAlignment);
printf("\ttexturePitchAlignment: %u\n", prop.texturePitchAlignment);
printf("\tdeviceOverlap: %d\n", prop.deviceOverlap);
printf("\tmultiProcessorCount: %d\n", prop.multiProcessorCount);
printf("\tkernelExecTimeoutEnabled: %d\n", prop.kernelExecTimeoutEnabled);
printf("\tintegrated: %d\n", prop.integrated);
printf("\tcanMapHostMemory: %d\n", prop.canMapHostMemory);
printf("\tcomputeMode: %d\n", prop.computeMode);
printf("\tmaxTexture1D: %d\n", prop.maxTexture1D);
printf("\tmaxTexture1DMipmap: %d\n", prop.maxTexture1DMipmap);
printf("\tmaxTexture1DLinear: %d\n", prop.maxTexture1DLinear);
printf("\tmaxTexture2D[2]: %d, %d\n", prop.maxTexture2D[0], prop.maxTexture2D[1]);
printf("\tmaxTexture2DMipmap[2]: %d, %d\n", prop.maxTexture2DMipmap[0], prop.maxTexture2DMipmap[1]);
printf("\tmaxTexture2DLinear[3]: %d, %d, %d\n", prop.maxTexture2DLinear[0], prop.maxTexture2DLinear[1], prop.maxTexture2DLinear[2]);
printf("\tmaxTexture2DGather[2]: %d, %d\n", prop.maxTexture2DGather[0], prop.maxTexture2DGather[1]);
printf("\tmaxTexture3D[3]: %d, %d, %d\n", prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]);
printf("\tmaxTexture3DAlt[3]: %d, %d, %d\n", prop.maxTexture3DAlt[0], prop.maxTexture3DAlt[1], prop.maxTexture3DAlt[2]);
printf("\tmaxTextureCubemap: %d\n", prop.maxTextureCubemap);
// int printf("\tmaxTexture1DLayered[2]; /**< Maximum 1D layered texture dimensions */
// int printf("\tmaxTexture2DLayered[3]; /**< Maximum 2D layered texture dimensions */
// int printf("\tmaxTextureCubemapLayered[2];/**< Maximum Cubemap layered texture dimensions */
// int printf("\tmaxSurface1D; /**< Maximum 1D surface size */
// int printf("\tmaxSurface2D[2]; /**< Maximum 2D surface dimensions */
// int printf("\tmaxSurface3D[3]; /**< Maximum 3D surface dimensions */
// int printf("\tmaxSurface1DLayered[2]; /**< Maximum 1D layered surface dimensions */
// int printf("\tmaxSurface2DLayered[3]; /**< Maximum 2D layered surface dimensions */
// int printf("\tmaxSurfaceCubemap; /**< Maximum Cubemap surface dimensions */
// int printf("\tmaxSurfaceCubemapLayered[2];/**< Maximum Cubemap layered surface dimensions */
// size_t printf("\tsurfaceAlignment; /**< Alignment requirements for surfaces */
// int printf("\tconcurrentKernels; /**< Device can possibly execute multiple kernels concurrently */
// int printf("\tECCEnabled; /**< Device has ECC support enabled */
// int printf("\tpciBusID; /**< PCI bus ID of the device */
// int printf("\tpciDeviceID; /**< PCI device ID of the device */
// int printf("\tpciDomainID; /**< PCI domain ID of the device */
// int printf("\ttccDriver; /**< 1 if device is a Tesla device using TCC driver, 0 otherwise */
// int printf("\tasyncEngineCount; /**< Number of asynchronous engines */
// int printf("\tunifiedAddressing; /**< Device shares a unified address space with the host */
// int printf("\tmemoryClockRate; /**< Peak memory clock frequency in kilohertz */
// int printf("\tmemoryBusWidth; /**< Global memory bus width in bits */
// int printf("\tl2CacheSize; /**< Size of L2 cache in bytes */
// int printf("\tmaxThreadsPerMultiProcessor;/**< Maximum resident threads per multiprocessor */
// int printf("\tstreamPrioritiesSupported; /**< Device supports stream priorities */
// int printf("\tglobalL1CacheSupported; /**< Device supports caching globals in L1 */
// int printf("\tlocalL1CacheSupported; /**< Device supports caching locals in L1 */
// size_t printf("\tsharedMemPerMultiprocessor; /**< Shared memory available per multiprocessor in bytes */
// int printf("\tregsPerMultiprocessor; /**< 32-bit registers available per multiprocessor */
// int printf("\tmanagedMemory; /**< Device supports allocating managed memory on this system */
// int printf("\tisMultiGpuBoard; /**< Device is on a multi-GPU board */
// int printf("\tmultiGpuBoardGroupID; /**< Unique identifier for a group of devices on the same multi-GPU board */
}
return 0;
}
struct BLOCK_THREAD_IDX {
uint32_t block_idx;
uint32_t thread_idx;
};
static void init_elements(struct BLOCK_THREAD_IDX* arr, int numElements)
{
while(numElements --)
{
arr->block_idx = 0;
arr->thread_idx = 0;
arr ++;
}
}
static int dev_alloc(void** buff, size_t num_bytes)
{
hipError_t err = hipSuccess;
err = hipMalloc((void**)buff, num_bytes);
if(hipSuccess != err) {
fprintf(stderr, "hipMalloc: %s\n", hipGetErrorString(err));
return 1;
}
return 0;
}
static int dev_free(void* buff)
{
hipError_t err = hipSuccess;
err = hipFree(buff);
if(hipSuccess != err)
{
fprintf(stderr, "hipFree: %s\n", hipGetErrorString(err));
return 2;
}
return 0;
}
static int copy_output_to_host(void* dest, void* src, int numBytes)
{
hipError_t err = hipMemcpy(dest, src, numBytes, hipMemcpyDeviceToHost);
if(hipSuccess != err)
{
fprintf(stderr, "copy output - hipMemcpy: %s\n", hipGetErrorString(err));
return 4;
}
return 0;
}
__global__ void collect(struct BLOCK_THREAD_IDX* el)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
el[i].block_idx = blockIdx.x;
el[i].thread_idx = threadIdx.x;
}
static void print_block_thread(struct BLOCK_THREAD_IDX* arr, int numElements)
{
while(numElements --) {
printf("\tblockIdx.x = %u, threadIdx.x = %u\n", arr->block_idx, arr->thread_idx);
arr ++;
}
}
static int z_print_block_thread()
{
int res = 0;
int num_blocks = 16;
int num_threads = 4;
int numElements = num_blocks * num_threads;
struct BLOCK_THREAD_IDX arr[numElements];
struct BLOCK_THREAD_IDX* dev_arr = 0;
if((res = dev_alloc((void**)&dev_arr, sizeof(arr))))
return res;
init_elements(arr, numElements);
hipLaunchKernelGGL(( collect), dim3(num_blocks), dim3(num_threads), 0, 0, dev_arr);
if((res = copy_output_to_host(arr, dev_arr, sizeof(arr))) || (res = dev_free(dev_arr)))
return res;
print_block_thread(arr, numElements);
return res;
}
main(void)
{
int res = 0;
(res = z_print_props()) || (res = z_print_block_thread());
return res;
}
|
f07034ced8d78de6fb677f455e2c4fb96e1d7d64.cu
|
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
static int z_print_props()
{
int count = 0;
int dev = 0;
cudaDeviceProp prop;
cudaError_t err = cudaGetDeviceCount(&count);
if(err != cudaSuccess) {
fprintf(stderr, "cudacudaGetDeviceCount: %s\n", cudaGetErrorString(err));
return 1;
}
for(dev = 0; dev < count; dev ++) {
err = cudaGetDeviceProperties(&prop, dev);
if(err != cudaSuccess) {
fprintf(stderr, "cudacudaGetDeviceCount: %s\n", cudaGetErrorString(err));
return 2;
}
printf("DEVICE: %d\n===================\n", dev);
printf("\tname: %s\n", prop.name);
printf("\ttotalGlobalMem: %u\n", prop.totalGlobalMem);
printf("\tsharedMemPerBlock: %u\n", prop.sharedMemPerBlock);
printf("\tregsPerBlock: %d\n", prop.regsPerBlock);
printf("\twarpSize: %d\n", prop.warpSize);
printf("\tmemPitch: %u\n", prop.memPitch);
printf("\tmaxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("\tmaxThreadsDim[3]: %d, %d, %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("\tmaxGridSize[3]: %d, %d, %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\tclockRate: %d\n", prop.clockRate);
printf("\ttotalConstMem: %u\n", prop.totalConstMem);
printf("\tmajor: %d\n", prop.major);
printf("\tminor: %d\n", prop.minor);
printf("\ttextureAlignment: %u\n", prop.textureAlignment);
printf("\ttexturePitchAlignment: %u\n", prop.texturePitchAlignment);
printf("\tdeviceOverlap: %d\n", prop.deviceOverlap);
printf("\tmultiProcessorCount: %d\n", prop.multiProcessorCount);
printf("\tkernelExecTimeoutEnabled: %d\n", prop.kernelExecTimeoutEnabled);
printf("\tintegrated: %d\n", prop.integrated);
printf("\tcanMapHostMemory: %d\n", prop.canMapHostMemory);
printf("\tcomputeMode: %d\n", prop.computeMode);
printf("\tmaxTexture1D: %d\n", prop.maxTexture1D);
printf("\tmaxTexture1DMipmap: %d\n", prop.maxTexture1DMipmap);
printf("\tmaxTexture1DLinear: %d\n", prop.maxTexture1DLinear);
printf("\tmaxTexture2D[2]: %d, %d\n", prop.maxTexture2D[0], prop.maxTexture2D[1]);
printf("\tmaxTexture2DMipmap[2]: %d, %d\n", prop.maxTexture2DMipmap[0], prop.maxTexture2DMipmap[1]);
printf("\tmaxTexture2DLinear[3]: %d, %d, %d\n", prop.maxTexture2DLinear[0], prop.maxTexture2DLinear[1], prop.maxTexture2DLinear[2]);
printf("\tmaxTexture2DGather[2]: %d, %d\n", prop.maxTexture2DGather[0], prop.maxTexture2DGather[1]);
printf("\tmaxTexture3D[3]: %d, %d, %d\n", prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]);
printf("\tmaxTexture3DAlt[3]: %d, %d, %d\n", prop.maxTexture3DAlt[0], prop.maxTexture3DAlt[1], prop.maxTexture3DAlt[2]);
printf("\tmaxTextureCubemap: %d\n", prop.maxTextureCubemap);
// int printf("\tmaxTexture1DLayered[2]; /**< Maximum 1D layered texture dimensions */
// int printf("\tmaxTexture2DLayered[3]; /**< Maximum 2D layered texture dimensions */
// int printf("\tmaxTextureCubemapLayered[2];/**< Maximum Cubemap layered texture dimensions */
// int printf("\tmaxSurface1D; /**< Maximum 1D surface size */
// int printf("\tmaxSurface2D[2]; /**< Maximum 2D surface dimensions */
// int printf("\tmaxSurface3D[3]; /**< Maximum 3D surface dimensions */
// int printf("\tmaxSurface1DLayered[2]; /**< Maximum 1D layered surface dimensions */
// int printf("\tmaxSurface2DLayered[3]; /**< Maximum 2D layered surface dimensions */
// int printf("\tmaxSurfaceCubemap; /**< Maximum Cubemap surface dimensions */
// int printf("\tmaxSurfaceCubemapLayered[2];/**< Maximum Cubemap layered surface dimensions */
// size_t printf("\tsurfaceAlignment; /**< Alignment requirements for surfaces */
// int printf("\tconcurrentKernels; /**< Device can possibly execute multiple kernels concurrently */
// int printf("\tECCEnabled; /**< Device has ECC support enabled */
// int printf("\tpciBusID; /**< PCI bus ID of the device */
// int printf("\tpciDeviceID; /**< PCI device ID of the device */
// int printf("\tpciDomainID; /**< PCI domain ID of the device */
// int printf("\ttccDriver; /**< 1 if device is a Tesla device using TCC driver, 0 otherwise */
// int printf("\tasyncEngineCount; /**< Number of asynchronous engines */
// int printf("\tunifiedAddressing; /**< Device shares a unified address space with the host */
// int printf("\tmemoryClockRate; /**< Peak memory clock frequency in kilohertz */
// int printf("\tmemoryBusWidth; /**< Global memory bus width in bits */
// int printf("\tl2CacheSize; /**< Size of L2 cache in bytes */
// int printf("\tmaxThreadsPerMultiProcessor;/**< Maximum resident threads per multiprocessor */
// int printf("\tstreamPrioritiesSupported; /**< Device supports stream priorities */
// int printf("\tglobalL1CacheSupported; /**< Device supports caching globals in L1 */
// int printf("\tlocalL1CacheSupported; /**< Device supports caching locals in L1 */
// size_t printf("\tsharedMemPerMultiprocessor; /**< Shared memory available per multiprocessor in bytes */
// int printf("\tregsPerMultiprocessor; /**< 32-bit registers available per multiprocessor */
// int printf("\tmanagedMemory; /**< Device supports allocating managed memory on this system */
// int printf("\tisMultiGpuBoard; /**< Device is on a multi-GPU board */
// int printf("\tmultiGpuBoardGroupID; /**< Unique identifier for a group of devices on the same multi-GPU board */
}
return 0;
}
struct BLOCK_THREAD_IDX {
uint32_t block_idx;
uint32_t thread_idx;
};
static void init_elements(struct BLOCK_THREAD_IDX* arr, int numElements)
{
while(numElements --)
{
arr->block_idx = 0;
arr->thread_idx = 0;
arr ++;
}
}
static int dev_alloc(void** buff, size_t num_bytes)
{
cudaError_t err = cudaSuccess;
err = cudaMalloc((void**)buff, num_bytes);
if(cudaSuccess != err) {
fprintf(stderr, "cudaMalloc: %s\n", cudaGetErrorString(err));
return 1;
}
return 0;
}
static int dev_free(void* buff)
{
cudaError_t err = cudaSuccess;
err = cudaFree(buff);
if(cudaSuccess != err)
{
fprintf(stderr, "cudaFree: %s\n", cudaGetErrorString(err));
return 2;
}
return 0;
}
static int copy_output_to_host(void* dest, void* src, int numBytes)
{
cudaError_t err = cudaMemcpy(dest, src, numBytes, cudaMemcpyDeviceToHost);
if(cudaSuccess != err)
{
fprintf(stderr, "copy output - cudaMemcpy: %s\n", cudaGetErrorString(err));
return 4;
}
return 0;
}
__global__ void collect(struct BLOCK_THREAD_IDX* el)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
el[i].block_idx = blockIdx.x;
el[i].thread_idx = threadIdx.x;
}
static void print_block_thread(struct BLOCK_THREAD_IDX* arr, int numElements)
{
while(numElements --) {
printf("\tblockIdx.x = %u, threadIdx.x = %u\n", arr->block_idx, arr->thread_idx);
arr ++;
}
}
static int z_print_block_thread()
{
int res = 0;
int num_blocks = 16;
int num_threads = 4;
int numElements = num_blocks * num_threads;
struct BLOCK_THREAD_IDX arr[numElements];
struct BLOCK_THREAD_IDX* dev_arr = 0;
if((res = dev_alloc((void**)&dev_arr, sizeof(arr))))
return res;
init_elements(arr, numElements);
collect<<<num_blocks, num_threads>>>(dev_arr);
if((res = copy_output_to_host(arr, dev_arr, sizeof(arr))) || (res = dev_free(dev_arr)))
return res;
print_block_thread(arr, numElements);
return res;
}
main(void)
{
int res = 0;
(res = z_print_props()) || (res = z_print_block_thread());
return res;
}
|
2108c2594bf8689d4f3fbc734d11d8a6d997d4c6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "milli.h"
// Image data
unsigned char *pixels = NULL;
unsigned char *cuda_peter = NULL;
int gImageWidth, gImageHeight;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
gImageWidth = width;
gImageHeight = height;
}
#define DIM 1024
// Select precision here! float or double!
#define MYFLOAT double
// User controlled parameters
int maxiter = 212;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
int* cuda_maxiter;
MYFLOAT* cuda_offsetx;
MYFLOAT* cuda_offsety;
MYFLOAT* cuda_zoom;
MYFLOAT* cuda_scale;
// Complex number class
__device__
struct hipComplex
{
MYFLOAT r;
MYFLOAT i;
__device__
hipComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
hipComplex operator*(const hipComplex& a)
{
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
hipComplex operator+(const hipComplex& a)
{
return hipComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrot(int x, int y, int maxiter, MYFLOAT offsetx,
MYFLOAT offsety, MYFLOAT zoom, MYFLOAT scale)
{
MYFLOAT jx = scale * (MYFLOAT)(DIM/2 - x + offsetx/scale)/(DIM/2);
MYFLOAT jy = scale * (MYFLOAT)(DIM/2 - y + offsety/scale)/(DIM/2);
hipComplex c(jx, jy);
hipComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000) return i;
}
return i;
}
__global__
void fractal_kernel(unsigned char* out, int maxiter, MYFLOAT offsetx,
MYFLOAT offsety, MYFLOAT zoom, MYFLOAT scale) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y*DIM;
// now calculate the value at that position
int fractalValue = mandelbrot(x, y, maxiter, offsetx, offsety, zoom, scale);
// Colorize it
int red = 255 * fractalValue/ maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/ maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/ maxiter;
if (blue > 255) blue = 255 - blue;
out[offset*4 + 0] = red;
out[offset*4 + 1] = green;
out[offset*4 + 2] = blue;
out[offset*4 + 3] = 255;
}
void computeFractal()
{
dim3 grid(DIM/32, DIM/32);
dim3 block(32, 32);
hipLaunchKernelGGL(( fractal_kernel), dim3(grid), dim3(block), 0, 0, cuda_peter, maxiter, offsetx, offsety, zoom, scale);
hipMemcpy(pixels, cuda_peter, DIM*DIM*4, hipMemcpyDeviceToHost);
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void* font, const char* string)
{
int i;
for (i = 0; string[i]; i++)
{
glutBitmapCharacter(font, string[i]);
}
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
int start = GetMicroseconds();
computeFractal();
int end = GetMicroseconds();
printf("Took %i usec\n", end-start);
hipError_t err = hipGetLastError();
if(err != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(err));
}
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help) {
PrintHelp();
}
glutSwapBuffers();
}
static void Reshape(int width, int height)
{
hipFree(cuda_peter);
hipMalloc((void**)cuda_peter, width*height);
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
hipMalloc((void**)&cuda_peter, DIM*DIM*4*sizeof(unsigned char));
hipMalloc((void**)&cuda_maxiter, sizeof(int));
hipMalloc((void**)&cuda_offsetx, sizeof(MYFLOAT));
hipMalloc((void**)&cuda_offsety, sizeof(MYFLOAT));
hipMalloc((void**)&cuda_zoom, sizeof(MYFLOAT));
hipMalloc((void**)&cuda_scale, sizeof(MYFLOAT));
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("ahs");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
// glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
hipFree(cuda_peter);
}
|
2108c2594bf8689d4f3fbc734d11d8a6d997d4c6.cu
|
// Mandelbrot explorer, based on my old Julia demo plus parts of Nicolas Melot's Lab 1 code.
// CPU only! Your task: Rewrite for CUDA! Test and evaluate performance.
// Compile with:
// gcc interactiveMandelbrot.cpp -shared-libgcc -lstdc++-static -o interactiveMandelbrot -lglut -lGL
// or
// g++ interactiveMandelbrot.cpp -o interactiveMandelbrot -lglut -lGL
// Your CUDA version should compile with something like
// nvcc -lglut -lGL interactiveMandelbrotCUDA.cu -o interactiveMandelbrotCUDA
// Preliminary version 2014-11-30
// Cleaned a bit more 2014-12-01
// Corrected the missing glRasterPos2i 2014-12-03
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <GLUT/glut.h>
#else
#include <GL/glut.h>
#include <GL/gl.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "milli.h"
// Image data
unsigned char *pixels = NULL;
unsigned char *cuda_peter = NULL;
int gImageWidth, gImageHeight;
// Init image data
void initBitmap(int width, int height)
{
if (pixels) free(pixels);
pixels = (unsigned char *)malloc(width * height * 4);
gImageWidth = width;
gImageHeight = height;
}
#define DIM 1024
// Select precision here! float or double!
#define MYFLOAT double
// User controlled parameters
int maxiter = 212;
MYFLOAT offsetx = -200, offsety = 0, zoom = 0;
MYFLOAT scale = 1.5;
int* cuda_maxiter;
MYFLOAT* cuda_offsetx;
MYFLOAT* cuda_offsety;
MYFLOAT* cuda_zoom;
MYFLOAT* cuda_scale;
// Complex number class
__device__
struct cuComplex
{
MYFLOAT r;
MYFLOAT i;
__device__
cuComplex( MYFLOAT a, MYFLOAT b ) : r(a), i(b) {}
__device__
float magnitude2( void )
{
return r * r + i * i;
}
__device__
cuComplex operator*(const cuComplex& a)
{
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__
cuComplex operator+(const cuComplex& a)
{
return cuComplex(r+a.r, i+a.i);
}
};
__device__
int mandelbrot(int x, int y, int maxiter, MYFLOAT offsetx,
MYFLOAT offsety, MYFLOAT zoom, MYFLOAT scale)
{
MYFLOAT jx = scale * (MYFLOAT)(DIM/2 - x + offsetx/scale)/(DIM/2);
MYFLOAT jy = scale * (MYFLOAT)(DIM/2 - y + offsety/scale)/(DIM/2);
cuComplex c(jx, jy);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<maxiter; i++)
{
a = a * a + c;
if (a.magnitude2() > 1000) return i;
}
return i;
}
__global__
void fractal_kernel(unsigned char* out, int maxiter, MYFLOAT offsetx,
MYFLOAT offsety, MYFLOAT zoom, MYFLOAT scale) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y*DIM;
// now calculate the value at that position
int fractalValue = mandelbrot(x, y, maxiter, offsetx, offsety, zoom, scale);
// Colorize it
int red = 255 * fractalValue/ maxiter;
if (red > 255) red = 255 - red;
int green = 255 * fractalValue*4/ maxiter;
if (green > 255) green = 255 - green;
int blue = 255 * fractalValue*20/ maxiter;
if (blue > 255) blue = 255 - blue;
out[offset*4 + 0] = red;
out[offset*4 + 1] = green;
out[offset*4 + 2] = blue;
out[offset*4 + 3] = 255;
}
void computeFractal()
{
dim3 grid(DIM/32, DIM/32);
dim3 block(32, 32);
fractal_kernel<<<grid, block>>>(cuda_peter, maxiter, offsetx, offsety, zoom, scale);
cudaMemcpy(pixels, cuda_peter, DIM*DIM*4, cudaMemcpyDeviceToHost);
}
char print_help = 0;
// Yuck, GLUT text is old junk that should be avoided... but it will have to do
static void print_str(void* font, const char* string)
{
int i;
for (i = 0; string[i]; i++)
{
glutBitmapCharacter(font, string[i]);
}
}
void PrintHelp()
{
if (print_help)
{
glPushMatrix();
glLoadIdentity();
glOrtho(-0.5, 639.5, -0.5, 479.5, -1.0, 1.0);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glColor4f(0.f, 0.f, 0.5f, 0.5f);
glRecti(40, 40, 600, 440);
glColor3f(1.f, 1.f, 1.f);
glRasterPos2i(300, 420);
print_str(GLUT_BITMAP_HELVETICA_18, "Help");
glRasterPos2i(60, 390);
print_str(GLUT_BITMAP_HELVETICA_18, "h - Toggle Help");
glRasterPos2i(60, 300);
print_str(GLUT_BITMAP_HELVETICA_18, "Left click + drag - move picture");
glRasterPos2i(60, 270);
print_str(GLUT_BITMAP_HELVETICA_18,
"Right click + drag up/down - unzoom/zoom");
glRasterPos2i(60, 240);
print_str(GLUT_BITMAP_HELVETICA_18, "+ - Increase max. iterations by 32");
glRasterPos2i(60, 210);
print_str(GLUT_BITMAP_HELVETICA_18, "- - Decrease max. iterations by 32");
glRasterPos2i(0, 0);
glDisable(GL_BLEND);
glPopMatrix();
}
}
// Compute fractal and display image
void Draw()
{
int start = GetMicroseconds();
computeFractal();
int end = GetMicroseconds();
printf("Took %i usec\n", end-start);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(err));
}
// Dump the whole picture onto the screen. (Old-style OpenGL but without lots of geometry that doesn't matter so much.)
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels );
if (print_help) {
PrintHelp();
}
glutSwapBuffers();
}
static void Reshape(int width, int height)
{
cudaFree(cuda_peter);
cudaMalloc((void**)cuda_peter, width*height);
glViewport(0, 0, width, height);
glLoadIdentity();
glOrtho(-0.5f, width - 0.5f, -0.5f, height - 0.5f, -1.f, 1.f);
initBitmap(width, height);
glutPostRedisplay();
}
int mouse_x, mouse_y, mouse_btn;
// Mouse down
static void mouse_button(int button, int state, int x, int y)
{
if (state == GLUT_DOWN)
{
// Record start position
mouse_x = x;
mouse_y = y;
mouse_btn = button;
}
}
// Drag mouse
static void mouse_motion(int x, int y)
{
if (mouse_btn == 0)
// Ordinary mouse button - move
{
offsetx += (x - mouse_x)*scale;
mouse_x = x;
offsety += (mouse_y - y)*scale;
mouse_y = y;
glutPostRedisplay();
}
else
// Alt mouse button - scale
{
scale *= pow(1.1, y - mouse_y);
mouse_y = y;
glutPostRedisplay();
}
}
void KeyboardProc(unsigned char key, int x, int y)
{
switch (key)
{
case 27: /* Escape key */
case 'q':
case 'Q':
exit(0);
break;
case '+':
maxiter += maxiter < 1024 - 32 ? 32 : 0;
break;
case '-':
maxiter -= maxiter > 0 + 32 ? 32 : 0;
break;
case 'h':
print_help = !print_help;
break;
}
glutPostRedisplay();
}
// Main program, inits
int main( int argc, char** argv)
{
cudaMalloc((void**)&cuda_peter, DIM*DIM*4*sizeof(unsigned char));
cudaMalloc((void**)&cuda_maxiter, sizeof(int));
cudaMalloc((void**)&cuda_offsetx, sizeof(MYFLOAT));
cudaMalloc((void**)&cuda_offsety, sizeof(MYFLOAT));
cudaMalloc((void**)&cuda_zoom, sizeof(MYFLOAT));
cudaMalloc((void**)&cuda_scale, sizeof(MYFLOAT));
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( DIM, DIM );
glutCreateWindow("ahs");
glutDisplayFunc(Draw);
glutMouseFunc(mouse_button);
glutMotionFunc(mouse_motion);
glutKeyboardFunc(KeyboardProc);
// glutReshapeFunc(Reshape);
initBitmap(DIM, DIM);
glutMainLoop();
cudaFree(cuda_peter);
}
|
c57085cf07149a074b8d1e3e3ac69ee9bff91ec2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
// Problem description: compute threads with catalan numbers
//
// Divide up the work as evenly as possible
/* Write device code / function here pls */
/* CUDA kernel function goes here */
__global__
void catalan(unsigned int catNums, double* data)
{
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
int j, k;
double result = 1.0;
k = i + 1;
// thread 0: i = 0 -> calculate first cat #
// thread 1: i = 1 -> calculate second cat #
// thread 2: i = 2 -> calculate the next one...
// thread 1000: i = 999
// all kernel threads (GPU) will enter and execute this kernel function
// i <- unique to each thread
//printf("thread number: %d\n", i);
if (i <= catNums)
{
for (j = 0; j < k; j++)
{
// compute the binomial coefficient
result *= ((2 * k) - j);
result /= (j + 1);
}
// resulting catalan number
result = result / (k + 1);
// store resulting catalan number into array
data[i] = result;
}
// result = (2*(2*i-1)*c)/(i+1);
}
__global__
void printThreads(int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x);
if (i < N)
{
printf("Thread number: %d\n", i);
}
}
/* Function to find and display information on installed GPU devices */
void printDeviceInfo()
{
struct hipDeviceProp_t dp;
int gpuCount;
int i;
hipGetDeviceCount(&gpuCount);
printf("%d GPU(s) found.\n", gpuCount);
for (i = 0; i < gpuCount; i++)
{
hipError_t err = hipGetDeviceProperties(&dp, i);
if (err == hipSuccess)
{
printf("GPU #%d [Compute Capability %d.%d] (%lg GB of Global Memory): %s\n", i, dp.major, dp.minor, dp.totalGlobalMem / 1073741824.0, dp.name);
//printf("GPU #%d connected to PCI Bus #%d as Device #%d\n", i, dp.pciBusID, dp.pciDeviceID);
}
}
}
/* Host entry point */
int main(int argc, char** argv)
{
unsigned int i, threadNums, catalanNums, blocks = 0;
double* catData, * dev_catData;
FILE* fp;
printDeviceInfo();
/*if (argc != 2)
{
printf("Usage: catalan catalanNums\n");
return -1;
}*/
catalanNums = 10000;
threadNums = catalanNums;
// allocate memory on the host
catData = (double*)malloc(sizeof(double) * catalanNums);
// allocate memory on the device
hipMalloc(&dev_catData, sizeof(double) * catalanNums);
// copy host memory to device
hipMemcpy(dev_catData, catData, catalanNums * sizeof(double), hipMemcpyHostToDevice);
// total number of 1024 thread blocks
if (threadNums % 1024 != 0)
{
blocks = (threadNums / 1024) + 1;
}
else
{
blocks = threadNums / 1024;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
// launch the threads onto the GPU
hipLaunchKernelGGL(( catalan), dim3(blocks),dim3(1024), 0, 0, catalanNums, dev_catData);
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("%f ms\n", milliseconds);
printf("Exited kernel\n");
hipMemcpy(catData, dev_catData, catalanNums * sizeof(double), hipMemcpyDeviceToHost);
if ((fp = fopen("catalan.dat", "a")) == NULL)
{
printf("Failed to open file: catalan.dat\n");
}
else
{
for (i = 0; i < catalanNums; i++)
{
fprintf(fp, "%.0lf\n", catData[i]);
}
}
fclose(fp);
hipFree(dev_catData);
free(catData);
return 0;
}
|
c57085cf07149a074b8d1e3e3ac69ee9bff91ec2.cu
|
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Problem description: compute threads with catalan numbers
//
// Divide up the work as evenly as possible
/* Write device code / function here pls */
/* CUDA kernel function goes here */
__global__
void catalan(unsigned int catNums, double* data)
{
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
int j, k;
double result = 1.0;
k = i + 1;
// thread 0: i = 0 -> calculate first cat #
// thread 1: i = 1 -> calculate second cat #
// thread 2: i = 2 -> calculate the next one...
// thread 1000: i = 999
// all kernel threads (GPU) will enter and execute this kernel function
// i <- unique to each thread
//printf("thread number: %d\n", i);
if (i <= catNums)
{
for (j = 0; j < k; j++)
{
// compute the binomial coefficient
result *= ((2 * k) - j);
result /= (j + 1);
}
// resulting catalan number
result = result / (k + 1);
// store resulting catalan number into array
data[i] = result;
}
// result = (2*(2*i-1)*c)/(i+1);
}
__global__
void printThreads(int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x);
if (i < N)
{
printf("Thread number: %d\n", i);
}
}
/* Function to find and display information on installed GPU devices */
void printDeviceInfo()
{
struct cudaDeviceProp dp;
int gpuCount;
int i;
cudaGetDeviceCount(&gpuCount);
printf("%d GPU(s) found.\n", gpuCount);
for (i = 0; i < gpuCount; i++)
{
cudaError_t err = cudaGetDeviceProperties(&dp, i);
if (err == cudaSuccess)
{
printf("GPU #%d [Compute Capability %d.%d] (%lg GB of Global Memory): %s\n", i, dp.major, dp.minor, dp.totalGlobalMem / 1073741824.0, dp.name);
//printf("GPU #%d connected to PCI Bus #%d as Device #%d\n", i, dp.pciBusID, dp.pciDeviceID);
}
}
}
/* Host entry point */
int main(int argc, char** argv)
{
unsigned int i, threadNums, catalanNums, blocks = 0;
double* catData, * dev_catData;
FILE* fp;
printDeviceInfo();
/*if (argc != 2)
{
printf("Usage: catalan catalanNums\n");
return -1;
}*/
catalanNums = 10000;
threadNums = catalanNums;
// allocate memory on the host
catData = (double*)malloc(sizeof(double) * catalanNums);
// allocate memory on the device
cudaMalloc(&dev_catData, sizeof(double) * catalanNums);
// copy host memory to device
cudaMemcpy(dev_catData, catData, catalanNums * sizeof(double), cudaMemcpyHostToDevice);
// total number of 1024 thread blocks
if (threadNums % 1024 != 0)
{
blocks = (threadNums / 1024) + 1;
}
else
{
blocks = threadNums / 1024;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
// launch the threads onto the GPU
catalan<<<blocks,1024>>>(catalanNums, dev_catData);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f ms\n", milliseconds);
printf("Exited kernel\n");
cudaMemcpy(catData, dev_catData, catalanNums * sizeof(double), cudaMemcpyDeviceToHost);
if ((fp = fopen("catalan.dat", "a")) == NULL)
{
printf("Failed to open file: catalan.dat\n");
}
else
{
for (i = 0; i < catalanNums; i++)
{
fprintf(fp, "%.0lf\n", catData[i]);
}
}
fclose(fp);
cudaFree(dev_catData);
free(catData);
return 0;
}
|
3445f591275154de5a1982b494ff8437d2e35b14.hip
|
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include "CommonHeaders.h"
#include "RenderBuffer.h"
#include "FaceBuffer.h"
#include "TransformationMatrixes.h"
#include "Rendering.h"
#include "CameraHandle.h"
#include <random>
#include <sstream>
#include "kernel.hip"
//#include <Game.h>
void DrawFPS(sf::RenderWindow &window, sf::Clock &Clock)
{
sf::Font font;
if (!font.loadFromFile("arial.ttf")){}
sf::Text text;
float fps = 1.f / Clock.getElapsedTime().asSeconds();
Clock.restart();
std::ostringstream ss(std::stringstream::in | std::stringstream::out);
ss << fps;
std::string str(ss.str());
sf::String string(str);
text.setString(string);
text.setFont(font);
text.setColor(sf::Color::Blue);
text.setStyle(sf::Text::Bold | sf::Text::Underlined);
window.draw(text);
}
int main()
{
//TEST POINT CLASS PLEASE !!!!!!!!!!
//Initialize our data
Vector3 v_array[6] = { Vector3(-15, -15, 100), Vector3(15, -15, 100), Vector3(0, 15, 100), Vector3(-15, 15, 300), Vector3(15, 15, 300), Vector3(0, -15, 300) };
//VertexBuffer buffer(v_array,6);
Vector3 location(0, 0, 0);
VertexBuffer buffer(0);
int counter = 0;
buffer.CreateCube(20, Vector3(100, 0, 200), 0);
buffer.CreateCube(20, Vector3(-100, 0, 200), 36);
buffer.CreateCube(20, Vector3(0, 0, 200), 72);
buffer.CreateCube(20, Vector3(0, 100, 200), 108);
buffer.CreateCube(20, Vector3(0, -100, 200), 144);
FaceBuffer triangleBuffer(1000);
triangleBuffer.createCube(20, Vector3(0, 0, 100), 0);
triangleBuffer.createCube(20, Vector3(0, 40, 100), 11);
//for (int x = 0; x < 100; x++)
//{
//triangleBuffer.createCube(20, location, counter);
//location = location + Vector3(40, 0, 0);
//counter += 1;
//}
RenderBuffer renderBuffer(buffer.getSize());
renderBuffer = buffer;
float sensitivity = 1.1;
CameraHandle cameraHandle(500, 500, sensitivity); // THE CAMERA WILL ROTATE INCORRECTLY AT THE FIRST FRAME !
float step = 4;
float rotate = 2;
Matrix m = TransformationMatrixes::rotationMatrix(0, 0, 0);
int Screen_Width = 1000, Screen_Height = 1000;
int View_Angle = 90;
Vector3 up(0, 1, 0);
Vector3 Camera_Pos(0, 0, 0);
Vector3 Camera_Look(0, 0, 0);
int View_Distance = 1500;
//CREATE OUR WINDOW
sf::RenderWindow window(sf::VideoMode(Screen_Width, Screen_Height), "3D Rendering! -- Yuval's Engine!");
//Create shapes
sf::Image image;
sf::Sprite sprite;
sf::Texture background;
sf::RenderTexture texture;
if (!texture.create(1000, 1000))
return -1;
image.create(1000, 1000, sf::Color::Black);
image.setPixel(100, 100, sf::Color::Red);
//if (!image.saveToFile("result.png"))
//return 1;
sf::Uint8* pixels = new sf::Uint8[Screen_Width * Screen_Height * 4];
for (int i = 0; i < Screen_Width * Screen_Height * 4; i++)
{
pixels[i] = rand() % 255;
}
if (!background.loadFromImage(image))
return -1;
sprite.setTexture(background);
//Rendering Method : raysFromCamera OR raysToCamera
std::string renderMethod = "To";
sf::Clock Clock;
sf::Time time;
const int arraySize = 10;
int a[arraySize];
int b[arraySize];
for (int i = 0; i < arraySize; i++){
a[i] = rand();
b[i] = rand();
}
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
//Game Loop
while (window.isOpen()){
//CREATE OUR EVENT HANDLE
sf::Event event;
//Event Loop
while (window.pollEvent(event))
{
switch (event.type)
{
case sf::Event::Closed:
window.close();
break;
case sf::Event::KeyPressed:
switch (event.key.code)
{
case sf::Keyboard::W:
renderBuffer.moveForward(step);
triangleBuffer.moveForward(step);
break;
case sf::Keyboard::A:
renderBuffer.moveLeft(step);
triangleBuffer.moveLeft(step);
break;
case sf::Keyboard::S:
renderBuffer.moveBackward(step);
triangleBuffer.moveBackward(step);
break;
case sf::Keyboard::D:
renderBuffer.moveRight(step);
triangleBuffer.moveRight(step);
break;
case sf::Keyboard::Space:
renderBuffer.moveUp(step);
triangleBuffer.moveUp(step);
break;
case sf::Keyboard::LShift:
renderBuffer.moveDown(step);
triangleBuffer.moveDown(step);
break;
}
break;
case sf::Event::MouseMoved:
//cameraHandle.MouseMovedCamera(event, renderBuffer);
cameraHandle.MouseMovedCamera(event, triangleBuffer);
break;
default:
break;
}
}
//Game Logic
if (renderMethod == "From")
{
background.update(Rendering::From(window, Camera_Pos, Camera_Look, buffer, View_Angle, Screen_Height, Screen_Width, View_Distance));
sprite.setTexture(background);
//Draw Logic
window.clear(sf::Color::White);
window.draw(sprite);
window.display();
}
if (renderMethod == "To")
{
window.clear(sf::Color::White);
DrawFPS(window, Clock);
//Rendering::To(window, Camera_Pos, Camera_Look, up, renderBuffer, View_Angle, Screen_Width, Screen_Height, View_Distance).DrawBufferTriangles(window);
Rendering::To(window, Camera_Pos, Camera_Look, up, triangleBuffer, View_Angle, Screen_Width, Screen_Height, View_Distance).DrawBufferTriangles(window);
}
}
}
|
3445f591275154de5a1982b494ff8437d2e35b14.cu
|
#pragma once
#include "CommonHeaders.h"
#include "RenderBuffer.h"
#include "FaceBuffer.h"
#include "TransformationMatrixes.h"
#include "Rendering.h"
#include "CameraHandle.h"
#include <random>
#include <sstream>
#include "kernel.cu"
//#include <Game.h>
void DrawFPS(sf::RenderWindow &window, sf::Clock &Clock)
{
sf::Font font;
if (!font.loadFromFile("arial.ttf")){}
sf::Text text;
float fps = 1.f / Clock.getElapsedTime().asSeconds();
Clock.restart();
std::ostringstream ss(std::stringstream::in | std::stringstream::out);
ss << fps;
std::string str(ss.str());
sf::String string(str);
text.setString(string);
text.setFont(font);
text.setColor(sf::Color::Blue);
text.setStyle(sf::Text::Bold | sf::Text::Underlined);
window.draw(text);
}
int main()
{
//TEST POINT CLASS PLEASE !!!!!!!!!!
//Initialize our data
Vector3 v_array[6] = { Vector3(-15, -15, 100), Vector3(15, -15, 100), Vector3(0, 15, 100), Vector3(-15, 15, 300), Vector3(15, 15, 300), Vector3(0, -15, 300) };
//VertexBuffer buffer(v_array,6);
Vector3 location(0, 0, 0);
VertexBuffer buffer(0);
int counter = 0;
buffer.CreateCube(20, Vector3(100, 0, 200), 0);
buffer.CreateCube(20, Vector3(-100, 0, 200), 36);
buffer.CreateCube(20, Vector3(0, 0, 200), 72);
buffer.CreateCube(20, Vector3(0, 100, 200), 108);
buffer.CreateCube(20, Vector3(0, -100, 200), 144);
FaceBuffer triangleBuffer(1000);
triangleBuffer.createCube(20, Vector3(0, 0, 100), 0);
triangleBuffer.createCube(20, Vector3(0, 40, 100), 11);
//for (int x = 0; x < 100; x++)
//{
//triangleBuffer.createCube(20, location, counter);
//location = location + Vector3(40, 0, 0);
//counter += 1;
//}
RenderBuffer renderBuffer(buffer.getSize());
renderBuffer = buffer;
float sensitivity = 1.1;
CameraHandle cameraHandle(500, 500, sensitivity); // THE CAMERA WILL ROTATE INCORRECTLY AT THE FIRST FRAME !
float step = 4;
float rotate = 2;
Matrix m = TransformationMatrixes::rotationMatrix(0, 0, 0);
int Screen_Width = 1000, Screen_Height = 1000;
int View_Angle = 90;
Vector3 up(0, 1, 0);
Vector3 Camera_Pos(0, 0, 0);
Vector3 Camera_Look(0, 0, 0);
int View_Distance = 1500;
//CREATE OUR WINDOW
sf::RenderWindow window(sf::VideoMode(Screen_Width, Screen_Height), "3D Rendering! -- Yuval's Engine!");
//Create shapes
sf::Image image;
sf::Sprite sprite;
sf::Texture background;
sf::RenderTexture texture;
if (!texture.create(1000, 1000))
return -1;
image.create(1000, 1000, sf::Color::Black);
image.setPixel(100, 100, sf::Color::Red);
//if (!image.saveToFile("result.png"))
//return 1;
sf::Uint8* pixels = new sf::Uint8[Screen_Width * Screen_Height * 4];
for (int i = 0; i < Screen_Width * Screen_Height * 4; i++)
{
pixels[i] = rand() % 255;
}
if (!background.loadFromImage(image))
return -1;
sprite.setTexture(background);
//Rendering Method : raysFromCamera OR raysToCamera
std::string renderMethod = "To";
sf::Clock Clock;
sf::Time time;
const int arraySize = 10;
int a[arraySize];
int b[arraySize];
for (int i = 0; i < arraySize; i++){
a[i] = rand();
b[i] = rand();
}
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
//Game Loop
while (window.isOpen()){
//CREATE OUR EVENT HANDLE
sf::Event event;
//Event Loop
while (window.pollEvent(event))
{
switch (event.type)
{
case sf::Event::Closed:
window.close();
break;
case sf::Event::KeyPressed:
switch (event.key.code)
{
case sf::Keyboard::W:
renderBuffer.moveForward(step);
triangleBuffer.moveForward(step);
break;
case sf::Keyboard::A:
renderBuffer.moveLeft(step);
triangleBuffer.moveLeft(step);
break;
case sf::Keyboard::S:
renderBuffer.moveBackward(step);
triangleBuffer.moveBackward(step);
break;
case sf::Keyboard::D:
renderBuffer.moveRight(step);
triangleBuffer.moveRight(step);
break;
case sf::Keyboard::Space:
renderBuffer.moveUp(step);
triangleBuffer.moveUp(step);
break;
case sf::Keyboard::LShift:
renderBuffer.moveDown(step);
triangleBuffer.moveDown(step);
break;
}
break;
case sf::Event::MouseMoved:
//cameraHandle.MouseMovedCamera(event, renderBuffer);
cameraHandle.MouseMovedCamera(event, triangleBuffer);
break;
default:
break;
}
}
//Game Logic
if (renderMethod == "From")
{
background.update(Rendering::From(window, Camera_Pos, Camera_Look, buffer, View_Angle, Screen_Height, Screen_Width, View_Distance));
sprite.setTexture(background);
//Draw Logic
window.clear(sf::Color::White);
window.draw(sprite);
window.display();
}
if (renderMethod == "To")
{
window.clear(sf::Color::White);
DrawFPS(window, Clock);
//Rendering::To(window, Camera_Pos, Camera_Look, up, renderBuffer, View_Angle, Screen_Width, Screen_Height, View_Distance).DrawBufferTriangles(window);
Rendering::To(window, Camera_Pos, Camera_Look, up, triangleBuffer, View_Angle, Screen_Width, Screen_Height, View_Distance).DrawBufferTriangles(window);
}
}
}
|
d1ce3b773b48314e054d0ee754d9126cacc1bc8c.hip
|
// !!! This is a file automatically generated by hipify!!!
// gillespie.cu Haixiang Xu
#include <cstdio>
#include <cmath>
#include <hip/hip_runtime.h>
#include "gillespie_cuda.cuh"
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
*/
// a single iteration of the Gillespie algorithm on
// the given system using an array of random numbers
// given as an argument.
__global__
void cudaGillKernel(float* dev_points,
float* dev_points_2,
float* state,
float* X,
float* dev_timestep,
float* dev_accu_time,
const int N) {
const float kon = 0.1;
const float koff = 0.9;
const float b = 10.0;
const float g = 1.0;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
if (state[idx] < 0.5){
dev_timestep[idx] = -logf(dev_points[idx]) / (kon + X[idx] * g));
dev_accu_time[idx] += dev_timestep[idx];
if (dev_points_2[idx] > kon / (kon + X[idx] * g)) { // if X--
X[idx]--;
} else { // if OFF --> ON
state[idx] = 1;
}
} else {
dev_timestep[idx] = -logf(dev_points[idx]) / (koff + b + X[idx] * g);
dev_accu_time[idx] += dev_timestep[idx];
if (dev_points_2[idx] <= koff / (koff + b + X[idx] * g)) { // ON --> OFF
state[idx] = 0;
} else if (dev_points_2[idx] <= (koff + b) / (koff + b + X[idx] * g)) { // X++
X[idx]++;
} else { // X--
X[idx]--;
}
}
// __syncthreads();
idx += blockDim.x * gridDim.x;
}
}
// a kernel to use reduction to find minimum
__global__
void cudaFindMinKernel (
float* dev_timestep,
float* min_timestep, // NEED TO ALLOCATE
const int N) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float data[64]; // rememeber to update this !!!!!!!
for (int i = 0; i < 64; ++i){
data[i] = 99999;
}
__syncthreads();
while (idx < N) {
{
atomicMin(&data[threadIdx.x], dev_timestep[idx]);
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
int l = blockDim.x;
while (l >= 1) {
l /= 2;
if (threadIdx.x < l) {
data[threadIdx.x] = (data[threadIdx.x]<data[threadIdx.x + l])? data[threadIdx.x]:data[threadIdx.x + l];
}
__syncthreads();
}
*min_timestep = data[0];
// printf("%f\n", min_timestep);
}
__global__
void cudaResampleKernel(
float* dev_resample_X,
float* dev_X,
float* dev_accu_time,
const int N,
const int T) {
// TODO
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int i = (floorf)(dev_accu_time[idx] * 10);
// printf("idx: %d, N: %d, i: %d, dev_ac: %f\n", idx, N, i, dev_accu_time[idx]);
for (int j = 0; j < i && j < T; ++j) {
// printf("j: %d , T: %d , i: %d , idx: %d , dex_X: %f , dev_resample: %f\n", j, T, i, idx, dev_X[idx], dev_resample_X[idx]);
if (dev_resample_X[idx + N * j] == 0) {
dev_resample_X[idx + N * j] = dev_X[idx];
// printf("%d %d, %f\n",idx, j, dev_X[idx]);
}
}
// while (dev_is_resampled[idx * T + i] == 0 && i >= 0 && i < T) {
// dev_is_resampled[idx * T + i] = 1;
// dev_resample_X[idx * T + i] = dev_X[idx];
// i--;
// }
idx += blockDim.x * gridDim.x;
}
}
__global__
void cudaMeanVarKernel(float* dev_resample_X,
float* dev_mean,
float* dev_var,
const int N,
const int T
) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
while (idx < T) {
for (int i = 0; i < N; ++i) {
dev_mean[idx] += dev_resample_X[idx * N + i];
dev_var[idx] += dev_resample_X[idx * N + i] * dev_resample_X[idx * N + i];
}
__syncthreads();
dev_mean[idx] /= N;
dev_var[idx] = dev_var[idx] / N - (dev_mean[idx] * dev_mean[idx]);
idx += blockDim.x * gridDim.x;
}
}
void cudaCallGillKernel(const int blocks,
const int threadsPerBlock,
float* dev_points,
float* dev_points_2,
float* state,
float* X,
float* dev_timestep,
float* dev_accu_time,
const int N) {
hipLaunchKernelGGL(( cudaGillKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_points, dev_points_2, state, X, dev_timestep, dev_accu_time, N);
}
void cudaCallFindMinKernel(const int blocks,
const int threadsPerBlock,
float* dev_accu_time,
float* dev_min_time,
const int N) {
hipLaunchKernelGGL(( cudaFindMinKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_accu_time, dev_min_time, N);
}
void cudaCallResampleKernel(const int blocks,
const int threadsPerBlock,
float* dev_resample_X,
float* dev_X,
float* dev_accu_time,
const int N,
const int T) {
hipLaunchKernelGGL(( cudaResampleKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_resample_X, dev_X, dev_accu_time, N, T);
}
void cudaCallMeanVarKernel(const int blocks,
const int threadsPerBlock,
float* dev_resample_X,
float* dev_mean,
float* dev_var,
const int N,
const int T
) {
hipLaunchKernelGGL(( cudaMeanVarKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_resample_X, dev_mean, dev_var, N, T);
}
|
d1ce3b773b48314e054d0ee754d9126cacc1bc8c.cu
|
// gillespie.cu Haixiang Xu
#include <cstdio>
#include <cmath>
#include <cuda_runtime.h>
#include "gillespie_cuda.cuh"
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
*/
// a single iteration of the Gillespie algorithm on
// the given system using an array of random numbers
// given as an argument.
__global__
void cudaGillKernel(float* dev_points,
float* dev_points_2,
float* state,
float* X,
float* dev_timestep,
float* dev_accu_time,
const int N) {
const float kon = 0.1;
const float koff = 0.9;
const float b = 10.0;
const float g = 1.0;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
if (state[idx] < 0.5){
dev_timestep[idx] = -logf(dev_points[idx]) / (kon + X[idx] * g));
dev_accu_time[idx] += dev_timestep[idx];
if (dev_points_2[idx] > kon / (kon + X[idx] * g)) { // if X--
X[idx]--;
} else { // if OFF --> ON
state[idx] = 1;
}
} else {
dev_timestep[idx] = -logf(dev_points[idx]) / (koff + b + X[idx] * g);
dev_accu_time[idx] += dev_timestep[idx];
if (dev_points_2[idx] <= koff / (koff + b + X[idx] * g)) { // ON --> OFF
state[idx] = 0;
} else if (dev_points_2[idx] <= (koff + b) / (koff + b + X[idx] * g)) { // X++
X[idx]++;
} else { // X--
X[idx]--;
}
}
// __syncthreads();
idx += blockDim.x * gridDim.x;
}
}
// a kernel to use reduction to find minimum
__global__
void cudaFindMinKernel (
float* dev_timestep,
float* min_timestep, // NEED TO ALLOCATE
const int N) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float data[64]; // rememeber to update this !!!!!!!
for (int i = 0; i < 64; ++i){
data[i] = 99999;
}
__syncthreads();
while (idx < N) {
{
atomicMin(&data[threadIdx.x], dev_timestep[idx]);
}
idx += blockDim.x * gridDim.x;
}
__syncthreads();
int l = blockDim.x;
while (l >= 1) {
l /= 2;
if (threadIdx.x < l) {
data[threadIdx.x] = (data[threadIdx.x]<data[threadIdx.x + l])? data[threadIdx.x]:data[threadIdx.x + l];
}
__syncthreads();
}
*min_timestep = data[0];
// printf("%f\n", min_timestep);
}
__global__
void cudaResampleKernel(
float* dev_resample_X,
float* dev_X,
float* dev_accu_time,
const int N,
const int T) {
// TODO
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N) {
int i = (floorf)(dev_accu_time[idx] * 10);
// printf("idx: %d, N: %d, i: %d, dev_ac: %f\n", idx, N, i, dev_accu_time[idx]);
for (int j = 0; j < i && j < T; ++j) {
// printf("j: %d , T: %d , i: %d , idx: %d , dex_X: %f , dev_resample: %f\n", j, T, i, idx, dev_X[idx], dev_resample_X[idx]);
if (dev_resample_X[idx + N * j] == 0) {
dev_resample_X[idx + N * j] = dev_X[idx];
// printf("%d %d, %f\n",idx, j, dev_X[idx]);
}
}
// while (dev_is_resampled[idx * T + i] == 0 && i >= 0 && i < T) {
// dev_is_resampled[idx * T + i] = 1;
// dev_resample_X[idx * T + i] = dev_X[idx];
// i--;
// }
idx += blockDim.x * gridDim.x;
}
}
__global__
void cudaMeanVarKernel(float* dev_resample_X,
float* dev_mean,
float* dev_var,
const int N,
const int T
) {
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
while (idx < T) {
for (int i = 0; i < N; ++i) {
dev_mean[idx] += dev_resample_X[idx * N + i];
dev_var[idx] += dev_resample_X[idx * N + i] * dev_resample_X[idx * N + i];
}
__syncthreads();
dev_mean[idx] /= N;
dev_var[idx] = dev_var[idx] / N - (dev_mean[idx] * dev_mean[idx]);
idx += blockDim.x * gridDim.x;
}
}
void cudaCallGillKernel(const int blocks,
const int threadsPerBlock,
float* dev_points,
float* dev_points_2,
float* state,
float* X,
float* dev_timestep,
float* dev_accu_time,
const int N) {
cudaGillKernel<<<blocks, threadsPerBlock>>>(dev_points, dev_points_2, state, X, dev_timestep, dev_accu_time, N);
}
void cudaCallFindMinKernel(const int blocks,
const int threadsPerBlock,
float* dev_accu_time,
float* dev_min_time,
const int N) {
cudaFindMinKernel<<<blocks, threadsPerBlock>>>(dev_accu_time, dev_min_time, N);
}
void cudaCallResampleKernel(const int blocks,
const int threadsPerBlock,
float* dev_resample_X,
float* dev_X,
float* dev_accu_time,
const int N,
const int T) {
cudaResampleKernel<<<blocks, threadsPerBlock>>>(dev_resample_X, dev_X, dev_accu_time, N, T);
}
void cudaCallMeanVarKernel(const int blocks,
const int threadsPerBlock,
float* dev_resample_X,
float* dev_mean,
float* dev_var,
const int N,
const int T
) {
cudaMeanVarKernel<<<blocks, threadsPerBlock>>>(dev_resample_X, dev_mean, dev_var, N, T);
}
|
b2b0501394006d65d04d667470c62e3c4f4746af.hip
|
// !!! This is a file automatically generated by hipify!!!
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. avzis <[email protected]>
// parasim
// (c) 1998-2023 all rights reserved
//
// configuration
#include <portinfo>
// STL
#include <complex>
// cuda
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
// pyre
#include <pyre/journal.h>
// pull the declarations
#include "kernels.h"
// the SAT generation kernel
template <typename value_t = float>
__global__
static void
_secStats(const value_t * sat,
std::size_t pairs,
std::size_t refRows, std::size_t refCols,
std::size_t secRows, std::size_t secCols,
std::size_t corRows, std::size_t corCols,
value_t * stats);
// implementation
// precompute the amplitude averages for all possible placements of a reference chip within the
// secondary search window for all pairs in the plan. we allocate room for
// {_pairs}*{corRows*corCols} floating point values and use the precomputed SAT tables resident
// on the device.
//
// the SAT tables require a slice and produce the sum of the values of cells within the slice
// in no more than four memory accesses per search tile; there are boundary cases to consider
// that add a bit of complexity to the implementation; the boundary cases could have been
// trivialized using ghost cells around the search window boundary, but the memory cost is high
void
ampcor::cuda::kernels::
secStats(const float * dSAT,
std::size_t pairs,
std::size_t refRows, std::size_t refCols,
std::size_t secRows, std::size_t secCols,
std::size_t corRows, std::size_t corCols,
float * dStats)
{
// make a channel
pyre::journal::info_t channel("ampcor.cuda.secStats");
// launch blocks of T threads
auto T = 128;
// in as many blocks as it takes to handle all pairs
auto B = pairs / T + (pairs % T ? 1 : 0);
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T
<< " threads each to handle the " << pairs
<< " entries of the secondary amplitude averages arena"
<< pyre::journal::endl;
// launch the kernels
hipLaunchKernelGGL(( _secStats) , dim3(B),dim3(T), 0, 0, dSAT,
pairs,
refRows, refCols, secRows, secCols, corRows, corCols,
dStats);
// check whether all went well
auto launchStatus = hipGetLastError();
// if something went wrong
if (launchStatus != hipSuccess) {
// form the error description
std::string description = hipGetErrorName(launchStatus);
// make a channel
pyre::journal::error_t channel("ampcor.cuda.secStats");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while computing the arena of placement averages"
<< description << " (" << launchStatus << ")"
<< pyre::journal::endl;
// bail
throw std::runtime_error(description);
}
// wait for the device to finish
auto execStatus = hipDeviceSynchronize();
// if something went wrong
if (execStatus != hipSuccess) {
// form the error description
std::string description = hipGetErrorName(execStatus);
// make a channel
pyre::journal::error_t channel("ampcor.cuda");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while computing the average amplitudes of all possible search window placements: "
<< description << " (" << execStatus << ")"
<< pyre::journal::endl;
// bail
throw std::runtime_error(description);
}
// all done
return;
}
// the SAT generation kernel
template <typename value_t>
__global__
void
_secStats(const value_t * dSAT,
std::size_t tiles,
std::size_t refRows, std::size_t refCols,
std::size_t secRows, std::size_t secCols,
std::size_t corRows, std::size_t corCols,
value_t * dStats)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
std::size_t w = b*T + t; // my worker id
// if my worker id exceeds the number of cells that require update
if (w >= tiles) {
// nothing for me to do
return;
}
// the shape of the SAT table includes the ghost cells
auto satRows = secRows + 1;
auto satCols = secCols + 1;
// compute the number of cells in a reference tile; it scales the running sum
auto refCells = refRows * refCols;
// the number of cells in a SAT; lets me skip to my SAT
auto satCells = satRows * satCols;
// and the number of cells in a {stats} matrix; lets me skip to my {stats} slice
auto corCells = corRows * corCols;
// locate the beginning of my SAT table
auto sat = dSAT + w*satCells;
// locate the beginning of my {stats} table
auto stats = dStats + w*corCells;
// fill each slot in the output table by looping over (row,col) indices
// the {row} range
for (auto row = 0; row < corRows; ++row) {
// the {col} range}
for (auto col = 0; col < corCols; ++col) {
// computing the sum of the secondary amplitudes for this placement involves
// reading four values from the SAT whose locations are derived from {row,col}
// N.B.: the SAT has a border with zeroes that guard against out of bounds
// accesses, but we must still get the arithmetic right
// the upper left corner
auto iUL = row*satCols + col;
// the upper right corner is {refCols+1} away from that
auto iUR = iUL + refCols;
// the lower left corner: skip (refRows+1) rows of the SAT
auto iLL = iUL + refRows*satCols;
// the lower right corner is just {refCols+1} away from that
auto iLR = iLL + refCols;
// the sum is
auto sum = sat[iLR] - sat[iLL] - sat[iUR] + sat[iUL];
// identify the slot we write to
auto slot = stats + row*corCols + col;
// store the result: the running sum scaled by the size of a reference tile
*slot = sum / refCells;
}
}
// all done
return;
}
// end of file
|
b2b0501394006d65d04d667470c62e3c4f4746af.cu
|
// -*- C++ -*-
// -*- coding: utf-8 -*-
//
// michael a.g. aïvázis <[email protected]>
// parasim
// (c) 1998-2023 all rights reserved
//
// configuration
#include <portinfo>
// STL
#include <complex>
// cuda
#include <cuda_runtime.h>
#include <cooperative_groups.h>
// pyre
#include <pyre/journal.h>
// pull the declarations
#include "kernels.h"
// the SAT generation kernel
template <typename value_t = float>
__global__
static void
_secStats(const value_t * sat,
std::size_t pairs,
std::size_t refRows, std::size_t refCols,
std::size_t secRows, std::size_t secCols,
std::size_t corRows, std::size_t corCols,
value_t * stats);
// implementation
// precompute the amplitude averages for all possible placements of a reference chip within the
// secondary search window for all pairs in the plan. we allocate room for
// {_pairs}*{corRows*corCols} floating point values and use the precomputed SAT tables resident
// on the device.
//
// the SAT tables require a slice and produce the sum of the values of cells within the slice
// in no more than four memory accesses per search tile; there are boundary cases to consider
// that add a bit of complexity to the implementation; the boundary cases could have been
// trivialized using ghost cells around the search window boundary, but the memory cost is high
void
ampcor::cuda::kernels::
secStats(const float * dSAT,
std::size_t pairs,
std::size_t refRows, std::size_t refCols,
std::size_t secRows, std::size_t secCols,
std::size_t corRows, std::size_t corCols,
float * dStats)
{
// make a channel
pyre::journal::info_t channel("ampcor.cuda.secStats");
// launch blocks of T threads
auto T = 128;
// in as many blocks as it takes to handle all pairs
auto B = pairs / T + (pairs % T ? 1 : 0);
// show me
channel
<< pyre::journal::at(__HERE__)
<< "launching " << B << " blocks of " << T
<< " threads each to handle the " << pairs
<< " entries of the secondary amplitude averages arena"
<< pyre::journal::endl;
// launch the kernels
_secStats <<<B,T>>> (dSAT,
pairs,
refRows, refCols, secRows, secCols, corRows, corCols,
dStats);
// check whether all went well
auto launchStatus = cudaGetLastError();
// if something went wrong
if (launchStatus != cudaSuccess) {
// form the error description
std::string description = cudaGetErrorName(launchStatus);
// make a channel
pyre::journal::error_t channel("ampcor.cuda.secStats");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while computing the arena of placement averages"
<< description << " (" << launchStatus << ")"
<< pyre::journal::endl;
// bail
throw std::runtime_error(description);
}
// wait for the device to finish
auto execStatus = cudaDeviceSynchronize();
// if something went wrong
if (execStatus != cudaSuccess) {
// form the error description
std::string description = cudaGetErrorName(execStatus);
// make a channel
pyre::journal::error_t channel("ampcor.cuda");
// complain
channel
<< pyre::journal::at(__HERE__)
<< "while computing the average amplitudes of all possible search window placements: "
<< description << " (" << execStatus << ")"
<< pyre::journal::endl;
// bail
throw std::runtime_error(description);
}
// all done
return;
}
// the SAT generation kernel
template <typename value_t>
__global__
void
_secStats(const value_t * dSAT,
std::size_t tiles,
std::size_t refRows, std::size_t refCols,
std::size_t secRows, std::size_t secCols,
std::size_t corRows, std::size_t corCols,
value_t * dStats)
{
// build the workload descriptors
// global
// std::size_t B = gridDim.x; // number of blocks
std::size_t T = blockDim.x; // number of threads per block
// std::size_t W = B*T; // total number of workers
// local
std::size_t b = blockIdx.x; // my block id
std::size_t t = threadIdx.x; // my thread id within my block
std::size_t w = b*T + t; // my worker id
// if my worker id exceeds the number of cells that require update
if (w >= tiles) {
// nothing for me to do
return;
}
// the shape of the SAT table includes the ghost cells
auto satRows = secRows + 1;
auto satCols = secCols + 1;
// compute the number of cells in a reference tile; it scales the running sum
auto refCells = refRows * refCols;
// the number of cells in a SAT; lets me skip to my SAT
auto satCells = satRows * satCols;
// and the number of cells in a {stats} matrix; lets me skip to my {stats} slice
auto corCells = corRows * corCols;
// locate the beginning of my SAT table
auto sat = dSAT + w*satCells;
// locate the beginning of my {stats} table
auto stats = dStats + w*corCells;
// fill each slot in the output table by looping over (row,col) indices
// the {row} range
for (auto row = 0; row < corRows; ++row) {
// the {col} range}
for (auto col = 0; col < corCols; ++col) {
// computing the sum of the secondary amplitudes for this placement involves
// reading four values from the SAT whose locations are derived from {row,col}
// N.B.: the SAT has a border with zeroes that guard against out of bounds
// accesses, but we must still get the arithmetic right
// the upper left corner
auto iUL = row*satCols + col;
// the upper right corner is {refCols+1} away from that
auto iUR = iUL + refCols;
// the lower left corner: skip (refRows+1) rows of the SAT
auto iLL = iUL + refRows*satCols;
// the lower right corner is just {refCols+1} away from that
auto iLR = iLL + refCols;
// the sum is
auto sum = sat[iLR] - sat[iLL] - sat[iUR] + sat[iUL];
// identify the slot we write to
auto slot = stats + row*corCols + col;
// store the result: the running sum scaled by the size of a reference tile
*slot = sum / refCells;
}
}
// all done
return;
}
// end of file
|
6bce5f00e0a073e39d0fcba1b7496f80cfc6fe78.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/platform/dynload/cublasLt.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class FusedGemmEpilogueKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* bias = ctx.Input<Tensor>("Bias");
Tensor* out = ctx.Output<Tensor>("Out");
Tensor* reserve_space = ctx.Output<Tensor>("ReserveSpace");
bool trans_x = ctx.Attr<bool>("trans_x");
bool trans_y = ctx.Attr<bool>("trans_y");
std::string activation = ctx.Attr<std::string>("activation");
VLOG(10) << "trans_x = " << trans_x << " , trans_y = " << trans_y
<< " , activation = " << activation;
bool enable_auxiliary = reserve_space == nullptr ? false : true;
dev_ctx.Alloc<T>(out, out->numel() * sizeof(T));
auto* out_data = out->data<T>();
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), trans_x ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = trans_x ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = trans_y ? y->dims()[1] : y->dims()[0];
int64_t N = trans_y ? y->dims()[0] : y->dims()[1];
hipDataType mat_type = HIP_R_32F;
hipDataType scale_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = HIP_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = HIP_R_64F;
scale_type = HIP_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtMatmulDesc_t operation_desc = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&operation_desc, compute_type, scale_type));
hipblasOperation_t transx = trans_x ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t transy = trans_y ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&transx,
sizeof(transx)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&transy,
sizeof(transy)));
cublasLtEpilogue_t epiloque_func =
get_epilogue_type_(activation, enable_auxiliary);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func,
sizeof(epiloque_func)));
const T* bias_data = bias->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&bias_data,
sizeof(bias_data)));
if (enable_auxiliary && activation != "none") {
size_t reserve_space_size = 0;
if (activation == "relu") {
// Count in bits.
reserve_space_size = phi::product(out->dims()) / 8;
} else {
reserve_space_size = phi::product(out->dims()) * sizeof(T);
}
dev_ctx.Alloc(reserve_space, out->type(), reserve_space_size);
void* aux_data = reinterpret_cast<void*>(reserve_space->data<T>());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
cublasLtMatrixLayout_t x_desc = NULL, y_desc = NULL, out_desc = NULL;
if (trans_x)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, M, K, M));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, K, M, K));
if (trans_y)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, K, N, K));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, N, K, N));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&out_desc, mat_type, N, M, N));
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
hipStream_t stream = dev_ctx.stream();
memory::allocation::AllocationPtr workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
const auto* y_data = y->data<T>();
const auto* x_data = x->data<T>();
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
operation_desc,
y_desc,
x_desc,
out_desc,
alpha,
beta,
y_data,
x_data,
out_data,
stream,
workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
operation_desc,
alpha,
y_data,
y_desc,
x_data,
x_desc,
beta,
out_data,
out_desc,
out_data,
out_desc,
algo,
workspace->ptr(),
workspace_size,
stream));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(operation_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(y_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(x_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(out_desc));
}
private:
static cublasLtEpilogue_t get_epilogue_type_(const std::string& activation,
bool enable_auxiliary) {
if (activation == "relu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_RELU_AUX_BIAS
: CUBLASLT_EPILOGUE_RELU_BIAS;
} else if (activation == "gelu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_GELU_AUX_BIAS
: CUBLASLT_EPILOGUE_GELU_BIAS;
} else if (activation == "none") {
return CUBLASLT_EPILOGUE_BIAS;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation attribute of fused_gemm_epilogue op should be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation=%s.",
activation));
}
}
};
enum FusedGEMMGradInType { kDX = 0, kDY = 1, kDZ = 2 };
template <bool TransX, bool TransY>
struct FusedGEMMGradTrait;
template <>
struct FusedGEMMGradTrait<false, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = false;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<false, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = false;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = true;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = true;
};
static constexpr auto BoolToCuBlasEnum(bool transpose) {
return transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
}
template <typename DeviceContext, typename T>
class FusedGemmEpilogueGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool transpose_x = ctx.Attr<bool>("trans_x");
bool transpose_y = ctx.Attr<bool>("trans_y");
if (transpose_x) {
if (transpose_y) {
ComputeImpl<true, true>(ctx);
} else {
ComputeImpl<true, false>(ctx);
}
} else {
if (transpose_y) {
ComputeImpl<false, true>(ctx);
} else {
ComputeImpl<false, false>(ctx);
}
}
}
private:
template <bool TransX, bool TransY>
static void ComputeImpl(const framework::ExecutionContext& ctx) {
using Trait = FusedGEMMGradTrait<TransX, TransY>;
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const Tensor* dout = ctx.Input<Tensor>("DOut");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* reserve_space = ctx.Input<Tensor>("ReserveSpace");
Tensor* dx = ctx.Output<Tensor>("DX");
Tensor* dy = ctx.Output<Tensor>("DY");
Tensor* dbias = ctx.Output<Tensor>("DBias");
std::string activation_grad = ctx.Attr<std::string>("activation_grad");
VLOG(10) << "trans_x = " << TransX << " , trans_y = " << TransY
<< " , activation_grad = " << activation_grad;
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), TransX ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = TransX ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = TransY ? y->dims()[1] : y->dims()[0];
int64_t N = TransY ? y->dims()[0] : y->dims()[1];
VLOG(10) << "M = " << M << " , K = " << K << " , N = " << N;
hipDataType mat_type = HIP_R_32F;
hipDataType scale_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = HIP_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = HIP_R_64F;
scale_type = HIP_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
const cublasLtMatmulAlgo_t* algo = nullptr;
hipStream_t stream = dev_ctx.stream();
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
cublasLtMatrixLayout_t dout_desc = nullptr, dout_trans_desc = nullptr;
cublasLtMatrixLayout_t x_desc = nullptr, x_trans_desc = nullptr;
cublasLtMatrixLayout_t y_desc = nullptr, y_trans_desc = nullptr;
cublasLtMatrixLayout_t dx_desc = nullptr, dy_desc = nullptr;
cublasLtMatmulDesc_t dx_operation_desc = nullptr,
dy_operation_desc = nullptr;
DEFINE_PADDLE_SCOPE_GUARD([&] {
auto descs = {dout_desc,
dout_trans_desc,
x_desc,
x_trans_desc,
y_desc,
y_trans_desc,
dx_desc,
dy_desc};
for (auto desc : descs) {
if (desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(desc));
}
}
if (dx_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dx_operation_desc));
}
if (dy_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dy_operation_desc));
}
});
auto x_row = TransX ? K : M;
auto x_col = TransX ? M : K;
auto y_row = TransY ? N : K;
auto y_col = TransY ? K : N;
auto z_row = TransX ? N : M;
auto z_col = TransX ? M : N;
// dx = func(dout, y)
if (dx) {
constexpr auto kXGradAIsDZ = (Trait::kXGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dx_dout_desc, *dx_y_desc;
if (TransX) {
dx_dout_desc = &dout_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_row, z_col, z_row));
} else {
dx_dout_desc = &dout_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_col, z_row, z_col));
}
dx_y_desc = &y_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dx_y_desc, mat_type, y_col, y_row, y_col));
auto& a_desc = kXGradAIsDZ ? (*dx_dout_desc) : (*dx_y_desc);
auto& b_desc = kXGradAIsDZ ? (*dx_y_desc) : (*dx_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kXGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kXGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dx_desc, mat_type, x_col, x_row, x_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dx_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dx =
get_epilogue_type_(activation_grad);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dx,
sizeof(epiloque_func_for_dx)));
if (activation_grad != "none") {
auto* aux_data = reserve_space->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = TransX ? M : K;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
auto dx_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dx_data = dev_ctx.Alloc<T>(dx, dx->numel() * sizeof(T));
const auto* y_data = y->data<T>();
const auto* dout_data = dout->data<T>();
const auto* a_data = kXGradAIsDZ ? dout_data : y_data;
const auto* b_data = kXGradAIsDZ ? y_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dx_operation_desc,
b_desc,
a_desc,
dx_desc,
alpha,
beta,
b_data,
a_data,
dx_data,
stream,
dx_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dx_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dx_data,
dx_desc,
dx_data,
dx_desc,
algo,
dx_workspace->ptr(),
workspace_size,
stream));
}
// dy = func(dout, x)
if (dy) {
constexpr auto kYGradAIsDZ = (Trait::kYGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dy_dout_desc = nullptr, *dy_x_desc = nullptr;
if (TransX) {
dy_dout_desc = &dout_trans_desc;
if (dout_trans_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_row, z_col, z_row));
}
} else {
dy_dout_desc = &dout_desc;
if (dout_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_col, z_row, z_col));
}
}
dy_x_desc = &x_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dy_x_desc, mat_type, x_col, x_row, x_col));
auto& a_desc = kYGradAIsDZ ? (*dy_dout_desc) : (*dy_x_desc);
auto& b_desc = kYGradAIsDZ ? (*dy_x_desc) : (*dy_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kYGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kYGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dy_desc, mat_type, y_col, y_row, y_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dy_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dy;
if (dbias == nullptr) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_DEFAULT;
} else {
if (TransY) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADB;
} else {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADA;
}
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dy,
sizeof(epiloque_func_for_dy)));
if (dbias) {
auto* dbias_data = dev_ctx.Alloc<T>(dbias, dbias->numel() * sizeof(T));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&dbias_data,
sizeof(dbias_data)));
}
auto dy_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dy_data = dev_ctx.Alloc<T>(dy, dy->numel() * sizeof(T));
const auto* dout_data = dout->data<T>();
const auto* x_data = x->data<T>();
const auto* a_data = kYGradAIsDZ ? dout_data : x_data;
const auto* b_data = kYGradAIsDZ ? x_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dy_operation_desc,
b_desc,
a_desc,
dy_desc,
alpha,
beta,
b_data,
a_data,
dy_data,
stream,
dy_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dy_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dy_data,
dy_desc,
dy_data,
dy_desc,
algo,
dy_workspace->ptr(),
workspace_size,
stream));
}
}
private:
static cublasLtEpilogue_t get_epilogue_type_(
const std::string& activation_grad) {
if (activation_grad == "relu_grad") {
return CUBLASLT_EPILOGUE_DRELU;
} else if (activation_grad == "gelu_grad") {
return CUBLASLT_EPILOGUE_DGELU;
} else if (activation_grad == "none") {
return CUBLASLT_EPILOGUE_DEFAULT;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation_grad attribute of fused_gemm_epilogue op should "
"be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation_grad=%s.",
activation_grad));
}
}
};
} // namespace operators
} // namespace paddle
#if TORCH_HIP_VERSION >= 11060
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue,
ops::FusedGemmEpilogueKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue_grad,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext,
paddle::platform::float16>);
#endif
|
6bce5f00e0a073e39d0fcba1b7496f80cfc6fe78.cu
|
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/platform/dynload/cublasLt.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class FusedGemmEpilogueKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* bias = ctx.Input<Tensor>("Bias");
Tensor* out = ctx.Output<Tensor>("Out");
Tensor* reserve_space = ctx.Output<Tensor>("ReserveSpace");
bool trans_x = ctx.Attr<bool>("trans_x");
bool trans_y = ctx.Attr<bool>("trans_y");
std::string activation = ctx.Attr<std::string>("activation");
VLOG(10) << "trans_x = " << trans_x << " , trans_y = " << trans_y
<< " , activation = " << activation;
bool enable_auxiliary = reserve_space == nullptr ? false : true;
dev_ctx.Alloc<T>(out, out->numel() * sizeof(T));
auto* out_data = out->data<T>();
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), trans_x ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = trans_x ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = trans_y ? y->dims()[1] : y->dims()[0];
int64_t N = trans_y ? y->dims()[0] : y->dims()[1];
cudaDataType_t mat_type = CUDA_R_32F;
cudaDataType_t scale_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = CUDA_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = CUDA_R_64F;
scale_type = CUDA_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtMatmulDesc_t operation_desc = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&operation_desc, compute_type, scale_type));
cublasOperation_t transx = trans_x ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t transy = trans_y ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&transx,
sizeof(transx)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&transy,
sizeof(transy)));
cublasLtEpilogue_t epiloque_func =
get_epilogue_type_(activation, enable_auxiliary);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func,
sizeof(epiloque_func)));
const T* bias_data = bias->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&bias_data,
sizeof(bias_data)));
if (enable_auxiliary && activation != "none") {
size_t reserve_space_size = 0;
if (activation == "relu") {
// Count in bits.
reserve_space_size = phi::product(out->dims()) / 8;
} else {
reserve_space_size = phi::product(out->dims()) * sizeof(T);
}
dev_ctx.Alloc(reserve_space, out->type(), reserve_space_size);
void* aux_data = reinterpret_cast<void*>(reserve_space->data<T>());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
cublasLtMatrixLayout_t x_desc = NULL, y_desc = NULL, out_desc = NULL;
if (trans_x)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, M, K, M));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, K, M, K));
if (trans_y)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, K, N, K));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, N, K, N));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&out_desc, mat_type, N, M, N));
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
cudaStream_t stream = dev_ctx.stream();
memory::allocation::AllocationPtr workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
const auto* y_data = y->data<T>();
const auto* x_data = x->data<T>();
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
operation_desc,
y_desc,
x_desc,
out_desc,
alpha,
beta,
y_data,
x_data,
out_data,
stream,
workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
operation_desc,
alpha,
y_data,
y_desc,
x_data,
x_desc,
beta,
out_data,
out_desc,
out_data,
out_desc,
algo,
workspace->ptr(),
workspace_size,
stream));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(operation_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(y_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(x_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(out_desc));
}
private:
static cublasLtEpilogue_t get_epilogue_type_(const std::string& activation,
bool enable_auxiliary) {
if (activation == "relu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_RELU_AUX_BIAS
: CUBLASLT_EPILOGUE_RELU_BIAS;
} else if (activation == "gelu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_GELU_AUX_BIAS
: CUBLASLT_EPILOGUE_GELU_BIAS;
} else if (activation == "none") {
return CUBLASLT_EPILOGUE_BIAS;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation attribute of fused_gemm_epilogue op should be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation=%s.",
activation));
}
}
};
enum FusedGEMMGradInType { kDX = 0, kDY = 1, kDZ = 2 };
template <bool TransX, bool TransY>
struct FusedGEMMGradTrait;
template <>
struct FusedGEMMGradTrait<false, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = false;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<false, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = false;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = true;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = true;
};
static constexpr auto BoolToCuBlasEnum(bool transpose) {
return transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
}
template <typename DeviceContext, typename T>
class FusedGemmEpilogueGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool transpose_x = ctx.Attr<bool>("trans_x");
bool transpose_y = ctx.Attr<bool>("trans_y");
if (transpose_x) {
if (transpose_y) {
ComputeImpl<true, true>(ctx);
} else {
ComputeImpl<true, false>(ctx);
}
} else {
if (transpose_y) {
ComputeImpl<false, true>(ctx);
} else {
ComputeImpl<false, false>(ctx);
}
}
}
private:
template <bool TransX, bool TransY>
static void ComputeImpl(const framework::ExecutionContext& ctx) {
using Trait = FusedGEMMGradTrait<TransX, TransY>;
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const Tensor* dout = ctx.Input<Tensor>("DOut");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* y = ctx.Input<Tensor>("Y");
const Tensor* reserve_space = ctx.Input<Tensor>("ReserveSpace");
Tensor* dx = ctx.Output<Tensor>("DX");
Tensor* dy = ctx.Output<Tensor>("DY");
Tensor* dbias = ctx.Output<Tensor>("DBias");
std::string activation_grad = ctx.Attr<std::string>("activation_grad");
VLOG(10) << "trans_x = " << TransX << " , trans_y = " << TransY
<< " , activation_grad = " << activation_grad;
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), TransX ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = TransX ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = TransY ? y->dims()[1] : y->dims()[0];
int64_t N = TransY ? y->dims()[0] : y->dims()[1];
VLOG(10) << "M = " << M << " , K = " << K << " , N = " << N;
cudaDataType_t mat_type = CUDA_R_32F;
cudaDataType_t scale_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = CUDA_R_16F;
}
if (std::is_same<T, double>::value) {
mat_type = CUDA_R_64F;
scale_type = CUDA_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
const cublasLtMatmulAlgo_t* algo = nullptr;
cudaStream_t stream = dev_ctx.stream();
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
cublasLtMatrixLayout_t dout_desc = nullptr, dout_trans_desc = nullptr;
cublasLtMatrixLayout_t x_desc = nullptr, x_trans_desc = nullptr;
cublasLtMatrixLayout_t y_desc = nullptr, y_trans_desc = nullptr;
cublasLtMatrixLayout_t dx_desc = nullptr, dy_desc = nullptr;
cublasLtMatmulDesc_t dx_operation_desc = nullptr,
dy_operation_desc = nullptr;
DEFINE_PADDLE_SCOPE_GUARD([&] {
auto descs = {dout_desc,
dout_trans_desc,
x_desc,
x_trans_desc,
y_desc,
y_trans_desc,
dx_desc,
dy_desc};
for (auto desc : descs) {
if (desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(desc));
}
}
if (dx_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dx_operation_desc));
}
if (dy_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dy_operation_desc));
}
});
auto x_row = TransX ? K : M;
auto x_col = TransX ? M : K;
auto y_row = TransY ? N : K;
auto y_col = TransY ? K : N;
auto z_row = TransX ? N : M;
auto z_col = TransX ? M : N;
// dx = func(dout, y)
if (dx) {
constexpr auto kXGradAIsDZ = (Trait::kXGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dx_dout_desc, *dx_y_desc;
if (TransX) {
dx_dout_desc = &dout_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_row, z_col, z_row));
} else {
dx_dout_desc = &dout_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_col, z_row, z_col));
}
dx_y_desc = &y_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dx_y_desc, mat_type, y_col, y_row, y_col));
auto& a_desc = kXGradAIsDZ ? (*dx_dout_desc) : (*dx_y_desc);
auto& b_desc = kXGradAIsDZ ? (*dx_y_desc) : (*dx_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kXGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kXGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dx_desc, mat_type, x_col, x_row, x_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dx_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dx =
get_epilogue_type_(activation_grad);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dx,
sizeof(epiloque_func_for_dx)));
if (activation_grad != "none") {
auto* aux_data = reserve_space->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = TransX ? M : K;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
auto dx_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dx_data = dev_ctx.Alloc<T>(dx, dx->numel() * sizeof(T));
const auto* y_data = y->data<T>();
const auto* dout_data = dout->data<T>();
const auto* a_data = kXGradAIsDZ ? dout_data : y_data;
const auto* b_data = kXGradAIsDZ ? y_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dx_operation_desc,
b_desc,
a_desc,
dx_desc,
alpha,
beta,
b_data,
a_data,
dx_data,
stream,
dx_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dx_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dx_data,
dx_desc,
dx_data,
dx_desc,
algo,
dx_workspace->ptr(),
workspace_size,
stream));
}
// dy = func(dout, x)
if (dy) {
constexpr auto kYGradAIsDZ = (Trait::kYGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dy_dout_desc = nullptr, *dy_x_desc = nullptr;
if (TransX) {
dy_dout_desc = &dout_trans_desc;
if (dout_trans_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_row, z_col, z_row));
}
} else {
dy_dout_desc = &dout_desc;
if (dout_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_col, z_row, z_col));
}
}
dy_x_desc = &x_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dy_x_desc, mat_type, x_col, x_row, x_col));
auto& a_desc = kYGradAIsDZ ? (*dy_dout_desc) : (*dy_x_desc);
auto& b_desc = kYGradAIsDZ ? (*dy_x_desc) : (*dy_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kYGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kYGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dy_desc, mat_type, y_col, y_row, y_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dy_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dy;
if (dbias == nullptr) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_DEFAULT;
} else {
if (TransY) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADB;
} else {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADA;
}
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dy,
sizeof(epiloque_func_for_dy)));
if (dbias) {
auto* dbias_data = dev_ctx.Alloc<T>(dbias, dbias->numel() * sizeof(T));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&dbias_data,
sizeof(dbias_data)));
}
auto dy_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dy_data = dev_ctx.Alloc<T>(dy, dy->numel() * sizeof(T));
const auto* dout_data = dout->data<T>();
const auto* x_data = x->data<T>();
const auto* a_data = kYGradAIsDZ ? dout_data : x_data;
const auto* b_data = kYGradAIsDZ ? x_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dy_operation_desc,
b_desc,
a_desc,
dy_desc,
alpha,
beta,
b_data,
a_data,
dy_data,
stream,
dy_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dy_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dy_data,
dy_desc,
dy_data,
dy_desc,
algo,
dy_workspace->ptr(),
workspace_size,
stream));
}
}
private:
static cublasLtEpilogue_t get_epilogue_type_(
const std::string& activation_grad) {
if (activation_grad == "relu_grad") {
return CUBLASLT_EPILOGUE_DRELU;
} else if (activation_grad == "gelu_grad") {
return CUBLASLT_EPILOGUE_DGELU;
} else if (activation_grad == "none") {
return CUBLASLT_EPILOGUE_DEFAULT;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation_grad attribute of fused_gemm_epilogue op should "
"be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation_grad=%s.",
activation_grad));
}
}
};
} // namespace operators
} // namespace paddle
#if CUDA_VERSION >= 11060
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue,
ops::FusedGemmEpilogueKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue_grad,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext,
paddle::platform::float16>);
#endif
|
de173b1548e312080f33bda93f9bd776a0ee143d.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
#define NUM_STREAMS 3
#define NUM_CHUNKS 16
#define CHUNK_SIZE NI/NUM_CHUNKS //256/2=256
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( 0 );
}
__global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i;
for (i = 1; i < NI - 1; ++i) // 0
{
hipLaunchKernelGGL(( convolution3D_kernel), dim3(grid), dim3(block) , 0, 0, A_gpu, B_gpu, i);
}
hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.3f Ms \n", elapsedTimeInMs);
hipFree(A_gpu);
hipFree(B_gpu);
}
void convolution3DCuda_async(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
hipEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipStream_t streams[NUM_STREAMS];
for (int i=0; i< NUM_STREAMS; i++)
hipStreamCreate(&(streams[i]));
//hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i,c;
//input initialization
hipMemcpyAsync(A_gpu, A, sizeof(DATA_TYPE)*NJ*NK, hipMemcpyHostToDevice,streams[0]);
for (c=0; c < NUM_CHUNKS; c++){
if (c==(NUM_CHUNKS-1)){
hipMemcpyAsync(A_gpu+(c*CHUNK_SIZE+1)*NJ*NK, A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*(CHUNK_SIZE-1), hipMemcpyHostToDevice,streams[c % NUM_STREAMS]);
}else{
hipMemcpyAsync(A_gpu+(c*CHUNK_SIZE+1)*NJ*NK, A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE, hipMemcpyHostToDevice,streams[c % NUM_STREAMS]);
}
for (i = (c*CHUNK_SIZE ); i < ((c+1)*CHUNK_SIZE); ++i) // 0
{
if ((i>=1)&&(i<(NI-1)))
hipLaunchKernelGGL(( convolution3D_kernel), dim3(grid), dim3(block),0,streams[c % NUM_STREAMS] , A_gpu, B_gpu, i);
}
hipMemcpyAsync(B_outputFromGpu+c*CHUNK_SIZE*NK*NJ,B_gpu+c*CHUNK_SIZE*NK*NJ,sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE,hipMemcpyDeviceToHost,streams[c % NUM_STREAMS]);
}
//hipMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//hipFree(A_gpu);
//hipFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
hipHostMalloc((void **)&A, sizeof(DATA_TYPE) * NI * NJ * NK, hipHostMallocPortable);
hipHostMalloc((void **)&B, sizeof(DATA_TYPE) * NI * NJ * NK, hipHostMallocPortable);
hipHostMalloc((void **)&B_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ *NK, hipHostMallocPortable);
init(A);
GPU_argv_init();
convolution3DCuda_async(A, B_outputFromGpu);
conv3D(A,B);
compareResults(B, B_outputFromGpu);
hipFree(A);
hipFree(B);
hipFree(B_outputFromGpu);
return 0;
}
|
de173b1548e312080f33bda93f9bd776a0ee143d.cu
|
/**
* 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.5
#define GPU_DEVICE 0
/* Problem size */
#define NI 4096
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 1024
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
#define NUM_STREAMS 3
#define NUM_CHUNKS 16
#define CHUNK_SIZE NI/NUM_CHUNKS //256/2=256
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
//printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( 0 );
}
__global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0))
{
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
//double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i;
for (i = 1; i < NI - 1; ++i) // 0
{
convolution3D_kernel<<< grid, block >>>(A_gpu, B_gpu, i);
}
cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.3f Ms \n", elapsedTimeInMs);
cudaFree(A_gpu);
cudaFree(B_gpu);
}
void convolution3DCuda_async(DATA_TYPE* A, DATA_TYPE* B_outputFromGpu)
{
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK);
cudaEvent_t start,stop;
float elapsedTimeInMs = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaStream_t streams[NUM_STREAMS];
for (int i=0; i< NUM_STREAMS; i++)
cudaStreamCreate(&(streams[i]));
//cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) )));
int i,c;
//input initialization
cudaMemcpyAsync(A_gpu, A, sizeof(DATA_TYPE)*NJ*NK, cudaMemcpyHostToDevice,streams[0]);
for (c=0; c < NUM_CHUNKS; c++){
if (c==(NUM_CHUNKS-1)){
cudaMemcpyAsync(A_gpu+(c*CHUNK_SIZE+1)*NJ*NK, A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*(CHUNK_SIZE-1), cudaMemcpyHostToDevice,streams[c % NUM_STREAMS]);
}else{
cudaMemcpyAsync(A_gpu+(c*CHUNK_SIZE+1)*NJ*NK, A+(c*CHUNK_SIZE+1)*NJ*NK, sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE, cudaMemcpyHostToDevice,streams[c % NUM_STREAMS]);
}
for (i = (c*CHUNK_SIZE ); i < ((c+1)*CHUNK_SIZE); ++i) // 0
{
if ((i>=1)&&(i<(NI-1)))
convolution3D_kernel<<< grid, block,0,streams[c % NUM_STREAMS] >>>(A_gpu, B_gpu, i);
}
cudaMemcpyAsync(B_outputFromGpu+c*CHUNK_SIZE*NK*NJ,B_gpu+c*CHUNK_SIZE*NK*NJ,sizeof(DATA_TYPE)*NJ*NK*CHUNK_SIZE,cudaMemcpyDeviceToHost,streams[c % NUM_STREAMS]);
}
//cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
fprintf(stdout,"GPU RunTime= %.2f Ms \n", elapsedTimeInMs);
//cudaFree(A_gpu);
//cudaFree(B_gpu);
}
int main(int argc, char *argv[])
{
//double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
//A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
//B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
cudaHostAlloc((void **)&A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&B, sizeof(DATA_TYPE) * NI * NJ * NK, cudaHostAllocPortable);
cudaHostAlloc((void **)&B_outputFromGpu, sizeof(DATA_TYPE) * NI * NJ *NK, cudaHostAllocPortable);
init(A);
GPU_argv_init();
convolution3DCuda_async(A, B_outputFromGpu);
conv3D(A,B);
compareResults(B, B_outputFromGpu);
cudaFree(A);
cudaFree(B);
cudaFree(B_outputFromGpu);
return 0;
}
|
724db4287001ccc9d0c149bb86cfdbeec4abc264.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "CudaObject.h"
#include "CudaCommon.cuh"
namespace gpu_cuda {
__global__ void calcRouteForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int z_offset, int elements )
{
// int i = blockIdx.x*blockDim.x + threadIdx.x;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
int id_in = id;
int x = id % in_size_x;
id /= in_size_x;
int y = id % in_size_y;
id /= in_size_y;
int z = id % in_size_z;
id /= in_size_z;
int b = id;
int id_out = b * (in_size_z * in_size_x * in_size_y) + (z + z_offset) * (in_size_x * in_size_y) + y * (in_size_x) + x;
out[id_out] = in[id_in];
}
/* original code
for ( int b = 0; b < layer_in.size.b; ++b ){
for ( int z = 0; z < layer_in.size.z; ++z ){
for ( int y = 0; y < layer_in.size.y; y++ ){
for ( int x = 0; x < layer_in.size.x; x++ ){
out( b, x, y, z_offset+z ) = layer_in( b, x, y, z );
}
}
}
}
*/
}
__global__ void calcRouteBackwardGPU( float *dz_in, float *dz, int in_size_x, int in_size_y, int in_size_z, int z_offset, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
int id_out = id;
int x = id % in_size_x;
id /= in_size_x;
int y = id % in_size_y;
id /= in_size_y;
int z = id % in_size_z;
id /= in_size_z;
int b = id;
int id_in = b * (in_size_z * in_size_x * in_size_y) + (z + z_offset) * (in_size_x * in_size_y) + y * (in_size_x) + x;
dz[id_out] += dz_in[id_in];
}
/*
for ( int b = 0; b < layer_dz.size.b; ++b ){
for ( int z = 0; z < layer_dz.size.z; ++z ){
for ( int y = 0; y < layer_dz.size.y; ++y ){
for ( int x = 0; x < layer_dz.size.x; ++x ){
layer_dz( b, x, y, z ) += dz_in( b, x, y, z_offset+z );
}
}
}
}
*/
}
void routeForwardGPU(float *in, float *out, int N, int in_size_x, int in_size_y, int in_size_z, int z_offset )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcRouteForwardGPU), dim3(grid), dim3(BLOCK), 0, 0, in, out, in_size_x, in_size_y, in_size_z, z_offset, N );
}
void routeBackwardAddFirstArrayToSecondArrayGPU( float *dz_next_layer, float *dz_in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid_in = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( cudaAddFirstArrayToSecondArray), dim3(grid_in), dim3(BLOCK), 0, 0, dz_next_layer, dz_in, N );
}
void routeBackwardGPU( float *dz_in, float *dz, int N, int in_size_x, int in_size_y, int in_size_z, int z_offset )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcRouteBackwardGPU), dim3(grid), dim3(BLOCK), 0, 0, dz_in, dz, in_size_x, in_size_y, in_size_z, z_offset, N );
}
} // namespace gpu
|
724db4287001ccc9d0c149bb86cfdbeec4abc264.cu
|
#include <stdio.h>
#include "CudaObject.h"
#include "CudaCommon.cuh"
namespace gpu_cuda {
__global__ void calcRouteForwardGPU(float *in, float *out, int in_size_x, int in_size_y, int in_size_z, int z_offset, int elements )
{
// int i = blockIdx.x*blockDim.x + threadIdx.x;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
int id_in = id;
int x = id % in_size_x;
id /= in_size_x;
int y = id % in_size_y;
id /= in_size_y;
int z = id % in_size_z;
id /= in_size_z;
int b = id;
int id_out = b * (in_size_z * in_size_x * in_size_y) + (z + z_offset) * (in_size_x * in_size_y) + y * (in_size_x) + x;
out[id_out] = in[id_in];
}
/* original code
for ( int b = 0; b < layer_in.size.b; ++b ){
for ( int z = 0; z < layer_in.size.z; ++z ){
for ( int y = 0; y < layer_in.size.y; y++ ){
for ( int x = 0; x < layer_in.size.x; x++ ){
out( b, x, y, z_offset+z ) = layer_in( b, x, y, z );
}
}
}
}
*/
}
__global__ void calcRouteBackwardGPU( float *dz_in, float *dz, int in_size_x, int in_size_y, int in_size_z, int z_offset, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
int id_out = id;
int x = id % in_size_x;
id /= in_size_x;
int y = id % in_size_y;
id /= in_size_y;
int z = id % in_size_z;
id /= in_size_z;
int b = id;
int id_in = b * (in_size_z * in_size_x * in_size_y) + (z + z_offset) * (in_size_x * in_size_y) + y * (in_size_x) + x;
dz[id_out] += dz_in[id_in];
}
/*
for ( int b = 0; b < layer_dz.size.b; ++b ){
for ( int z = 0; z < layer_dz.size.z; ++z ){
for ( int y = 0; y < layer_dz.size.y; ++y ){
for ( int x = 0; x < layer_dz.size.x; ++x ){
layer_dz( b, x, y, z ) += dz_in( b, x, y, z_offset+z );
}
}
}
}
*/
}
void routeForwardGPU(float *in, float *out, int N, int in_size_x, int in_size_y, int in_size_z, int z_offset )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcRouteForwardGPU<<<grid, BLOCK>>>(in, out, in_size_x, in_size_y, in_size_z, z_offset, N );
}
void routeBackwardAddFirstArrayToSecondArrayGPU( float *dz_next_layer, float *dz_in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid_in = cuda.cudaGridSize(N);
cudaAddFirstArrayToSecondArray<<<grid_in, BLOCK>>>( dz_next_layer, dz_in, N );
}
void routeBackwardGPU( float *dz_in, float *dz, int N, int in_size_x, int in_size_y, int in_size_z, int z_offset )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcRouteBackwardGPU<<<grid, BLOCK>>>( dz_in, dz, in_size_x, in_size_y, in_size_z, z_offset, N );
}
} // namespace gpu
|
773fe55230d63ba34c83048fc3f03d31355923a1.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "array.hpp"
#include "types.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "kernel_dispatcher.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
#include <vector>
#include <iostream>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t Rank>
__global__ void slice(
Span<T> output, array<size_type, Rank> out_strides,
View<T> input, array<size_type, Rank> in_strides, array<index_type, Rank> in_offset)
{
for (auto i : grid_stride_range(output.size())) {
index_type out_index = i / out_strides[0];
index_type in_index = in_offset[0] + out_index;
index_type iidx = in_index * in_strides[0];
for (int j = 1; j < Rank; j++) {
out_index = (i % out_strides[j - 1]) / out_strides[j];
in_index = in_offset[j] + out_index;
iidx += in_index * in_strides[j];
}
output[i] = input[iidx];
}
}
}
template <class T, std::size_t Rank> static
void launch_slice(
const Stream& stream,
Span<T> output, const std::vector<std::size_t>& outStride,
View<T> input, const std::vector<std::size_t>& inStride, const std::vector<std::size_t>& inOffset)
{
CV_Assert(outStride.size() == Rank);
CV_Assert(inStride.size() == Rank);
CV_Assert(inOffset.size() == Rank);
array<size_type, Rank> outStride_k, inStride_k;
outStride_k.assign(std::begin(outStride), std::end(outStride));
inStride_k.assign(std::begin(inStride), std::end(inStride));
array<index_type, Rank> inOffset_k;
inOffset_k.assign(std::begin(inOffset), std::end(inOffset));
auto kernel = raw::slice<T, Rank>;
auto policy = make_policy(kernel, output.size(), 0, stream);
launch_kernel(kernel, policy, output, outStride_k, input, inStride_k, inOffset_k);
}
GENERATE_KERNEL_DISPATCHER(slice_dispatcher, launch_slice);
template <class T>
void slice(const Stream& stream,
TensorSpan<T> output, TensorView<T> input,
std::vector<std::size_t> offsets)
{
CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == offsets.size());
/* squeezable axes at the beginning of both tensors can be eliminated
*
* Reasoning:
* ----------
* Suppose an item's indices in the output tensor is [o1, o2, ...]. The indices in the input
* tensor will be [o1 + off1, o2 + off2, ...]. The rest of the elements in the input are ignored.
*
* If the size of the first axis of the input and output tensor is unity, the input and output indices
* for all the elements will be of the form be [0, o2 + off2, ...] and [0, o2, ...] respectively. Note that
* there cannot be any ignored items since the axes have unit size. The first index does not contribute to the
* element's address calculation and hence does nothing apart from eating up few cycles.
*/
while (input.get_axis_size(0) == 1 && output.get_axis_size(0) == 1) {
CV_Assert(offsets[0] == 0);
input.squeeze(0);
output.squeeze(0);
offsets.erase(std::begin(offsets));
CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == offsets.size());
}
auto inShape = input.shape_as_vector();
auto outShape = output.shape_as_vector();
/* contiguous axes which do not undergo slicing can be combined into one axis
*
* Reasoning:
* ----------
* Suppose an item's indices in the output tensor is [o1, o2, o3, ...]. Let the first two axes not undergo any
* slicing. The indices in the input tensor will be [o1, o2, o3 + off3, ...].
*
* Each axis in the contiguous unsliced axes sequence will add an offset of iN * strideN. In the above example,
* the two axes add a total offset of `o1 * stride1 + o2 * stride2`. We can merge the two axes into one axis with
* a size of `size1 * size2`. The new offset added will be o12 * stride2` as the kernel iterates through `o12`.
* Note that `o12` is actually `(o1 * size2 + o2)` in the original tensor.
*/
for (int i = 0; i < inShape.size(); i++) {
/* check if axis `i` requires any slicing */
if (offsets[i] == 0 && inShape[i] == outShape[i]) {
/* loop invariant: `i` is the first axis in the contiguous unsliced axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < inShape.size() && offsets[j] == 0 && inShape[j] == outShape[j]) {
/* `j` axis is also unsliced; merge `i` and `j` */
auto new_size = inShape[i] * inShape[j];
inShape[i] = new_size;
outShape[i] = new_size;
offsets[i] = 0; /* redundant */
/* delete axis `j` */
inShape.erase(std::begin(inShape) + j);
outShape.erase(std::begin(outShape) + j);
offsets.erase(std::begin(offsets) + j);
/* optimizations should not break the invariants */
CV_Assert(inShape.size() == outShape.size());
CV_Assert(inShape.size() == offsets.size());
CV_Assert(inShape[i] == outShape[i]);
CV_Assert(offsets[i] == 0);
}
}
}
auto rank = inShape.size();
std::vector<std::size_t> inStride(rank), outStride(rank);
inStride.back() = 1;
outStride.back() = 1;
/* garbage, ..., garbage, 1 */
std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride));
std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride));
/* dim[0], dim[1], ..., dim[-1], 1 */
std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>());
/* stride[0], stride[1], ..., stride[-2], 1 */
CV_Assert(1 <= rank && rank <= CSL_MAX_TENSOR_RANK);
slice_dispatcher<T, 1, CSL_MAX_TENSOR_RANK>(rank, stream, output, outStride, input, inStride, offsets);
}
template void slice(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
773fe55230d63ba34c83048fc3f03d31355923a1.cu
|
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "array.hpp"
#include "types.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "kernel_dispatcher.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include <opencv2/core.hpp>
#include <cstddef>
#include <vector>
#include <iostream>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t Rank>
__global__ void slice(
Span<T> output, array<size_type, Rank> out_strides,
View<T> input, array<size_type, Rank> in_strides, array<index_type, Rank> in_offset)
{
for (auto i : grid_stride_range(output.size())) {
index_type out_index = i / out_strides[0];
index_type in_index = in_offset[0] + out_index;
index_type iidx = in_index * in_strides[0];
for (int j = 1; j < Rank; j++) {
out_index = (i % out_strides[j - 1]) / out_strides[j];
in_index = in_offset[j] + out_index;
iidx += in_index * in_strides[j];
}
output[i] = input[iidx];
}
}
}
template <class T, std::size_t Rank> static
void launch_slice(
const Stream& stream,
Span<T> output, const std::vector<std::size_t>& outStride,
View<T> input, const std::vector<std::size_t>& inStride, const std::vector<std::size_t>& inOffset)
{
CV_Assert(outStride.size() == Rank);
CV_Assert(inStride.size() == Rank);
CV_Assert(inOffset.size() == Rank);
array<size_type, Rank> outStride_k, inStride_k;
outStride_k.assign(std::begin(outStride), std::end(outStride));
inStride_k.assign(std::begin(inStride), std::end(inStride));
array<index_type, Rank> inOffset_k;
inOffset_k.assign(std::begin(inOffset), std::end(inOffset));
auto kernel = raw::slice<T, Rank>;
auto policy = make_policy(kernel, output.size(), 0, stream);
launch_kernel(kernel, policy, output, outStride_k, input, inStride_k, inOffset_k);
}
GENERATE_KERNEL_DISPATCHER(slice_dispatcher, launch_slice);
template <class T>
void slice(const Stream& stream,
TensorSpan<T> output, TensorView<T> input,
std::vector<std::size_t> offsets)
{
CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == offsets.size());
/* squeezable axes at the beginning of both tensors can be eliminated
*
* Reasoning:
* ----------
* Suppose an item's indices in the output tensor is [o1, o2, ...]. The indices in the input
* tensor will be [o1 + off1, o2 + off2, ...]. The rest of the elements in the input are ignored.
*
* If the size of the first axis of the input and output tensor is unity, the input and output indices
* for all the elements will be of the form be [0, o2 + off2, ...] and [0, o2, ...] respectively. Note that
* there cannot be any ignored items since the axes have unit size. The first index does not contribute to the
* element's address calculation and hence does nothing apart from eating up few cycles.
*/
while (input.get_axis_size(0) == 1 && output.get_axis_size(0) == 1) {
CV_Assert(offsets[0] == 0);
input.squeeze(0);
output.squeeze(0);
offsets.erase(std::begin(offsets));
CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == offsets.size());
}
auto inShape = input.shape_as_vector();
auto outShape = output.shape_as_vector();
/* contiguous axes which do not undergo slicing can be combined into one axis
*
* Reasoning:
* ----------
* Suppose an item's indices in the output tensor is [o1, o2, o3, ...]. Let the first two axes not undergo any
* slicing. The indices in the input tensor will be [o1, o2, o3 + off3, ...].
*
* Each axis in the contiguous unsliced axes sequence will add an offset of iN * strideN. In the above example,
* the two axes add a total offset of `o1 * stride1 + o2 * stride2`. We can merge the two axes into one axis with
* a size of `size1 * size2`. The new offset added will be o12 * stride2` as the kernel iterates through `o12`.
* Note that `o12` is actually `(o1 * size2 + o2)` in the original tensor.
*/
for (int i = 0; i < inShape.size(); i++) {
/* check if axis `i` requires any slicing */
if (offsets[i] == 0 && inShape[i] == outShape[i]) {
/* loop invariant: `i` is the first axis in the contiguous unsliced axis sequence */
int j = i + 1; /* `j` is the axis which we will attempt to merge */
while (j < inShape.size() && offsets[j] == 0 && inShape[j] == outShape[j]) {
/* `j` axis is also unsliced; merge `i` and `j` */
auto new_size = inShape[i] * inShape[j];
inShape[i] = new_size;
outShape[i] = new_size;
offsets[i] = 0; /* redundant */
/* delete axis `j` */
inShape.erase(std::begin(inShape) + j);
outShape.erase(std::begin(outShape) + j);
offsets.erase(std::begin(offsets) + j);
/* optimizations should not break the invariants */
CV_Assert(inShape.size() == outShape.size());
CV_Assert(inShape.size() == offsets.size());
CV_Assert(inShape[i] == outShape[i]);
CV_Assert(offsets[i] == 0);
}
}
}
auto rank = inShape.size();
std::vector<std::size_t> inStride(rank), outStride(rank);
inStride.back() = 1;
outStride.back() = 1;
/* garbage, ..., garbage, 1 */
std::copy(std::begin(inShape) + 1, std::end(inShape), std::begin(inStride));
std::copy(std::begin(outShape) + 1, std::end(outShape), std::begin(outStride));
/* dim[0], dim[1], ..., dim[-1], 1 */
std::partial_sum(inStride.rbegin(), inStride.rend(), inStride.rbegin(), std::multiplies<std::size_t>());
std::partial_sum(outStride.rbegin(), outStride.rend(), outStride.rbegin(), std::multiplies<std::size_t>());
/* stride[0], stride[1], ..., stride[-2], 1 */
CV_Assert(1 <= rank && rank <= CSL_MAX_TENSOR_RANK);
slice_dispatcher<T, 1, CSL_MAX_TENSOR_RANK>(rank, stream, output, outStride, input, inStride, offsets);
}
template void slice(const Stream&, TensorSpan<__half>, TensorView<__half>, std::vector<std::size_t>);
template void slice(const Stream&, TensorSpan<float>, TensorView<float>, std::vector<std::size_t>);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
1f326339bdb353c370b9bd8ebe34a3953e555fb5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Matrix Multiplication in gpu with 2D grid of blocks with 1D block shape
// Compile with: nvcc -o test matrix_multiplication_2D_2D.cu -std=c++11
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <chrono>
// Multiplies matrices using GPU with 2D grid
__global__ void multiply_matrix_gpu(long *matA, long *matB, long *matC, const int n) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
if (ix < n && iy < n) {
for(int k=0; k<n; k++) {
matC[iy*n+ix] += matA[iy*n+k] * matB[k*n+ix];
}
}
}
// Multiplies matrices in host
void multiply_matrix_host(long *matA, long *matB, long *matC, int n) {
for(int i = 0; i<n; i++) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[i*n+j] += matA[i*n+k] * matB[j+k*n];
}
}
}
}
// Compares two matrices
void checkResult(long *hostRef, long *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n*n; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("host %ld gpu %ld\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match) printf("Matrix match.\n\n");
else printf("Matrix does not not match.\n\n");
}
int main(int argc, char* argv[]) {
// Set up device
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
hipSetDevice(dev);
// Size of matrix
int n = 1000;
int bytes = n * n * sizeof(long*);
// Host matrix memory
long *h_a = (long *)malloc(bytes);
long *h_b = (long *)malloc(bytes);
// Results
long *hostRef = (long *)malloc(bytes);
long *gpuRef = (long *)malloc(bytes);
// Initialize matrix on host
for(int i = 0; i < n*n; i++ ) {
h_a[i] = i+1;
h_b[i] = i+1;
}
// Initialize matrix with 0s
memset(hostRef, 0, bytes);
memset(gpuRef, 0, bytes);
// Multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_host(h_a, h_b, hostRef, n);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in host
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_host elapsed %f ms\n", duration_ms.count());
// Device matrix global memory
long *d_a, *d_b, *d_c;
hipMalloc((void **)&d_a, bytes);
hipMalloc((void **)&d_b, bytes);
hipMalloc((void **)&d_c, bytes);
// Transfer data from host to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
hipMemset(d_c, 0, bytes); // Initialize matrix with 0s
// Kernel execution configuration
dim3 block(128);
dim3 grid((n + block.x - 1) / block.x, n);
printf("grid.x %d grid.y %d block.x %d \n", grid.x, grid.y, block.x);
// Execute kernel
start_cpu = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( multiply_matrix_gpu), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, n);
hipDeviceSynchronize();
end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time
duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_gpu elapsed %f ms\n", duration_ms.count());
// Copy result from device to host
hipMemcpy(gpuRef, d_c, bytes, hipMemcpyDeviceToHost);
// Check results
checkResult(hostRef, gpuRef, n);
// Free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(hostRef);
free(gpuRef);
hipDeviceReset();
return 0;
}
|
1f326339bdb353c370b9bd8ebe34a3953e555fb5.cu
|
// Matrix Multiplication in gpu with 2D grid of blocks with 1D block shape
// Compile with: nvcc -o test matrix_multiplication_2D_2D.cu -std=c++11
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <chrono>
// Multiplies matrices using GPU with 2D grid
__global__ void multiply_matrix_gpu(long *matA, long *matB, long *matC, const int n) {
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
if (ix < n && iy < n) {
for(int k=0; k<n; k++) {
matC[iy*n+ix] += matA[iy*n+k] * matB[k*n+ix];
}
}
}
// Multiplies matrices in host
void multiply_matrix_host(long *matA, long *matB, long *matC, int n) {
for(int i = 0; i<n; i++) {
for(int j=0; j<n; j++) {
for(int k=0; k<n; k++) {
matC[i*n+j] += matA[i*n+k] * matB[j+k*n];
}
}
}
}
// Compares two matrices
void checkResult(long *hostRef, long *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n*n; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("host %ld gpu %ld\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match) printf("Matrix match.\n\n");
else printf("Matrix does not not match.\n\n");
}
int main(int argc, char* argv[]) {
// Set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
// Size of matrix
int n = 1000;
int bytes = n * n * sizeof(long*);
// Host matrix memory
long *h_a = (long *)malloc(bytes);
long *h_b = (long *)malloc(bytes);
// Results
long *hostRef = (long *)malloc(bytes);
long *gpuRef = (long *)malloc(bytes);
// Initialize matrix on host
for(int i = 0; i < n*n; i++ ) {
h_a[i] = i+1;
h_b[i] = i+1;
}
// Initialize matrix with 0s
memset(hostRef, 0, bytes);
memset(gpuRef, 0, bytes);
// Multiply matrix on host
auto start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_host(h_a, h_b, hostRef, n);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time in host
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_host elapsed %f ms\n", duration_ms.count());
// Device matrix global memory
long *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, bytes);
cudaMalloc((void **)&d_b, bytes);
cudaMalloc((void **)&d_c, bytes);
// Transfer data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
cudaMemset(d_c, 0, bytes); // Initialize matrix with 0s
// Kernel execution configuration
dim3 block(128);
dim3 grid((n + block.x - 1) / block.x, n);
printf("grid.x %d grid.y %d block.x %d \n", grid.x, grid.y, block.x);
// Execute kernel
start_cpu = std::chrono::high_resolution_clock::now();
multiply_matrix_gpu<<<grid, block>>>(d_a, d_b, d_c, n);
cudaDeviceSynchronize();
end_cpu = std::chrono::high_resolution_clock::now();
// Measure total time
duration_ms = end_cpu - start_cpu;
printf("multiply_matrix_gpu elapsed %f ms\n", duration_ms.count());
// Copy result from device to host
cudaMemcpy(gpuRef, d_c, bytes, cudaMemcpyDeviceToHost);
// Check results
checkResult(hostRef, gpuRef, n);
// Free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(hostRef);
free(gpuRef);
cudaDeviceReset();
return 0;
}
|
8872d0fe94adba39cced34dd30d3a37fb3419e92.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "wireframe_params.hpp"
#include "wireframe_common.hpp"
#include "core/math/rng.hpp"
#include "core/memory/residency.hpp"
#include "core/renderer/path_util.hpp"
#include "core/scene/accel_structs/intersection.hpp"
#include "core/scene/lights/light_sampling.hpp"
#include "core/scene/lights/light_tree_sampling.hpp"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <ei/vector.hpp>
#include <random>
using namespace mufflon::scene::lights;
namespace mufflon {
namespace renderer {
__global__ static void wireframe_kernel(WireframeTargets::RenderBufferType<Device::CUDA> outputBuffer,
scene::SceneDescriptor<Device::CUDA>* scene,
const u32* seeds, WireframeParameters params) {
Pixel coord{
threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y
};
if(coord.x >= outputBuffer.get_width() || coord.y >= outputBuffer.get_height())
return;
const int pixel = coord.x + coord.y * outputBuffer.get_width();
math::Rng rng(seeds[pixel]);
#ifdef __CUDA_ARCH__
sample_wireframe(outputBuffer, *scene, params, rng, coord);
#endif // __CUDA_ARCH__
}
namespace gpuwireframe_detail {
hipError_t call_kernel(const dim3& gridDims, const dim3& blockDims,
WireframeTargets::RenderBufferType<Device::CUDA>&& outputBuffer,
scene::SceneDescriptor<Device::CUDA>* scene,
const u32* seeds, const WireframeParameters& params) {
hipLaunchKernelGGL(( wireframe_kernel), dim3(gridDims), dim3(blockDims), 0, 0, std::move(outputBuffer), scene, seeds, params);
hipDeviceSynchronize();
return hipGetLastError();
}
} // namespace gpuwireframe_detail
}
} // namespace mufflon::renderer
|
8872d0fe94adba39cced34dd30d3a37fb3419e92.cu
|
#include "wireframe_params.hpp"
#include "wireframe_common.hpp"
#include "core/math/rng.hpp"
#include "core/memory/residency.hpp"
#include "core/renderer/path_util.hpp"
#include "core/scene/accel_structs/intersection.hpp"
#include "core/scene/lights/light_sampling.hpp"
#include "core/scene/lights/light_tree_sampling.hpp"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <ei/vector.hpp>
#include <random>
using namespace mufflon::scene::lights;
namespace mufflon {
namespace renderer {
__global__ static void wireframe_kernel(WireframeTargets::RenderBufferType<Device::CUDA> outputBuffer,
scene::SceneDescriptor<Device::CUDA>* scene,
const u32* seeds, WireframeParameters params) {
Pixel coord{
threadIdx.x + blockDim.x * blockIdx.x,
threadIdx.y + blockDim.y * blockIdx.y
};
if(coord.x >= outputBuffer.get_width() || coord.y >= outputBuffer.get_height())
return;
const int pixel = coord.x + coord.y * outputBuffer.get_width();
math::Rng rng(seeds[pixel]);
#ifdef __CUDA_ARCH__
sample_wireframe(outputBuffer, *scene, params, rng, coord);
#endif // __CUDA_ARCH__
}
namespace gpuwireframe_detail {
cudaError_t call_kernel(const dim3& gridDims, const dim3& blockDims,
WireframeTargets::RenderBufferType<Device::CUDA>&& outputBuffer,
scene::SceneDescriptor<Device::CUDA>* scene,
const u32* seeds, const WireframeParameters& params) {
wireframe_kernel<<<gridDims, blockDims>>>(std::move(outputBuffer), scene, seeds, params);
cudaDeviceSynchronize();
return cudaGetLastError();
}
} // namespace gpuwireframe_detail
}
} // namespace mufflon::renderer
|
f4cbe7eb01a4590f82078e5ff9f7f6cbf6bc5b2a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
static double *cuda_acc;
void cuda_init(int rank)
{
int device_count;
hipGetDeviceCount(&device_count);
array(int) devices = array_new(ARRAY_VECTOR, int);
for (int i = 0; i < device_count; i++) {
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, i);
if (device_prop.major >= 2) {
array_push(&devices, i);
}
}
if (array_size(devices) == 0) {
exit(1);
}
int selected_device = devices[0];
hipSetDevice(selected_device);
//printf("(%d) cuda selected device: %d\n", rank, selected_device);
array_delete(&devices);
// accumulator for reduce operations
hipMalloc(&cuda_acc, sizeof(double));
}
void cuda_finalize()
{
hipFree(cuda_acc);
}
void *cuda_create_load_array_to_device(array(double) array)
{
void *cuda_array;
hipMalloc(&cuda_array, array_size(array) * array_item_size(array));
cuda_load_array_to_device(cuda_array, array);
return cuda_array;
}
void cuda_load_array_to_device(void *cuda_array, array(double) array)
{
hipMemcpy(cuda_array, array, array_size(array) * array_item_size(array), hipMemcpyHostToDevice);
}
void cuda_load_array_from_device(void *cuda_array, array(double) array)
{
hipMemcpy(array, cuda_array, array_size(array) * array_item_size(array), hipMemcpyDeviceToHost);
}
void cuda_delete_array(void *cuda_array)
{
hipFree(cuda_array);
}
__global__ void calc_residual_vector(double *cuda_res_vect, double *cuda_sol_vect, double *cuda_rhs_vect, int ic, int jc, double hx, double hy)
{
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
double lp;
if (cuda_sol_vect == NULL) {
lp = 0.0;
} else {
lp = left_part(cuda_sol_vect, INDEX_X, INDEX_Y);
}
cuda_res_vect[INDEX(ic)] = lp - cuda_rhs_vect[INDEX(ic)];
}
}
void cuda_calc_residual_vector(void *cuda_res_vect, void *cuda_sol_vect, void *cuda_rhs_vect, int (*indexes)[D], double h[D])
{
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_residual_vector), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), 0, 0, (double *)cuda_res_vect, (double *)cuda_sol_vect, (double *)cuda_rhs_vect, ic, jc, h[X], h[Y]);
}
__global__ void calc_product(double *cuda_vect1, double *cuda_vect2, double *cuda_acc, int ic, int jc)
{
extern __shared__ double shared[];
double tacc = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
tacc = cuda_vect1[INDEX(ic)] * cuda_vect2[INDEX(ic)];
}
cuda_reduce_sum(shared, THREAD_IDX, tacc);
if (THREAD_IDX == 0) {
atomicAdd(cuda_acc, shared[0]);
}
}
void cuda_calc_product(void *cuda_vect1, void *cuda_vect2, double *acc, int (*indexes)[D])
{
hipMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_product), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), BLOCK_SIZE * sizeof(double), 0, (double *)cuda_vect1, (double *)cuda_vect2, (double *)cuda_acc, ic, jc);
hipMemcpy(acc, cuda_acc, sizeof(double), hipMemcpyDeviceToHost);
}
__global__ void calc_Aproduct(double *cuda_vect1, double *cuda_vect2, double *cuda_acc, int ic, int jc, double hx, double hy)
{
extern __shared__ double shared[];
double tacc = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
tacc = left_part(cuda_vect1, INDEX_X, INDEX_Y) * cuda_vect2[INDEX(ic)];
}
cuda_reduce_sum(shared, THREAD_IDX, tacc);
if (THREAD_IDX == 0) {
atomicAdd(cuda_acc, shared[0]);
}
}
void cuda_calc_Aproduct(void *cuda_vect1, void *cuda_vect2, double *acc, int (*indexes)[D], double h[D])
{
hipMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_Aproduct), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), BLOCK_SIZE * sizeof(double), 0, (double *)cuda_vect1, (double *)cuda_vect2, (double *)cuda_acc, ic, jc, h[X], h[Y]);
hipMemcpy(acc, cuda_acc, sizeof(double), hipMemcpyDeviceToHost);
}
__global__ void calc_it0_solution_vector(double *cuda_sol_vect, double *cuda_res_vect, int ic, int jc, double tau)
{
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
cuda_sol_vect[INDEX(ic)] = -tau * cuda_res_vect[INDEX(ic)];
}
}
void cuda_calc_it0_solution_vector(void *cuda_sol_vect, void *cuda_res_vect, int (*indexes)[D], double tau)
{
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_it0_solution_vector), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), 0, 0, (double *)cuda_sol_vect, (double *)cuda_res_vect, ic, jc, tau);
}
__global__ void calc_itn_solution_vector(double *cuda_sol_vect, double *cuda_basis_vect, double *cuda_acc, int ic, int jc, double tau)
{
extern __shared__ double shared[];
double terr = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
double new_value = cuda_sol_vect[INDEX(ic)] - tau * cuda_basis_vect[INDEX(ic)];
terr = fabs(new_value - cuda_sol_vect[INDEX(ic)]);
cuda_sol_vect[INDEX(ic)] = new_value;
}
cuda_reduce_max(shared, THREAD_IDX, terr);
if (THREAD_IDX == 0) {
atomicMax(cuda_acc, shared[0]);
}
}
void cuda_calc_itn_solution_vector(void *cuda_sol_vect, void *cuda_basis_vect, double *err, int (*indexes)[D], double tau)
{
hipMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_itn_solution_vector), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), BLOCK_SIZE * sizeof(double), 0, (double *)cuda_sol_vect, (double *)cuda_basis_vect, (double *)cuda_acc, ic, jc, tau);
hipMemcpy(err, cuda_acc, sizeof(double), hipMemcpyDeviceToHost);
}
__global__ void calc_basis_vector(double *cuda_basis_vect, double *cuda_res_vect, int ic, int jc, double alpha)
{
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
cuda_basis_vect[INDEX(ic)] = cuda_res_vect[INDEX(ic)] - alpha * cuda_basis_vect[INDEX(ic)];
}
}
void cuda_calc_basis_vector(void *cuda_basis_vect, void *cuda_res_vect, int (*indexes)[D], double alpha)
{
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_basis_vector), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), 0, 0, (double *)cuda_basis_vect, (double *)cuda_res_vect, ic, jc, alpha);
}
__global__ void calc_error(double *cuda_sol_vect, double *cuda_acc, int ic, int jc, double hx, double hy, double offset_x, double offset_y)
{
extern __shared__ double shared[];
double terr = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
double bv = boundary_value((INDEX_X + offset_x - 1) * hx, (INDEX_Y + offset_y - 1) * hy);
terr = fabs(bv - cuda_sol_vect[INDEX(ic)]);
}
cuda_reduce_max(shared, THREAD_IDX, terr);
if (THREAD_IDX == 0) {
atomicMax(cuda_acc, shared[0]);
}
}
void cuda_calc_error(void *cuda_sol_vect, double *err, int (*indexes)[D], double h[D])
{
hipMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
hipLaunchKernelGGL(( calc_error), dim3(GRID_DIM(ic, jc)), dim3(BLOCK_DIM), BLOCK_SIZE * sizeof(double), 0, (double *)cuda_sol_vect, (double *)cuda_acc, ic, jc, h[X], h[Y], indexes[X][START], indexes[Y][START]);
hipMemcpy(err, cuda_acc, sizeof(double), hipMemcpyDeviceToHost);
}
|
f4cbe7eb01a4590f82078e5ff9f7f6cbf6bc5b2a.cu
|
#include <cstdio>
#include <cstdlib>
#include "cuda.h"
#include "cuda_utils.h"
static double *cuda_acc;
void cuda_init(int rank)
{
int device_count;
cudaGetDeviceCount(&device_count);
array(int) devices = array_new(ARRAY_VECTOR, int);
for (int i = 0; i < device_count; i++) {
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, i);
if (device_prop.major >= 2) {
array_push(&devices, i);
}
}
if (array_size(devices) == 0) {
exit(1);
}
int selected_device = devices[0];
cudaSetDevice(selected_device);
//printf("(%d) cuda selected device: %d\n", rank, selected_device);
array_delete(&devices);
// accumulator for reduce operations
cudaMalloc(&cuda_acc, sizeof(double));
}
void cuda_finalize()
{
cudaFree(cuda_acc);
}
void *cuda_create_load_array_to_device(array(double) array)
{
void *cuda_array;
cudaMalloc(&cuda_array, array_size(array) * array_item_size(array));
cuda_load_array_to_device(cuda_array, array);
return cuda_array;
}
void cuda_load_array_to_device(void *cuda_array, array(double) array)
{
cudaMemcpy(cuda_array, array, array_size(array) * array_item_size(array), cudaMemcpyHostToDevice);
}
void cuda_load_array_from_device(void *cuda_array, array(double) array)
{
cudaMemcpy(array, cuda_array, array_size(array) * array_item_size(array), cudaMemcpyDeviceToHost);
}
void cuda_delete_array(void *cuda_array)
{
cudaFree(cuda_array);
}
__global__ void calc_residual_vector(double *cuda_res_vect, double *cuda_sol_vect, double *cuda_rhs_vect, int ic, int jc, double hx, double hy)
{
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
double lp;
if (cuda_sol_vect == NULL) {
lp = 0.0;
} else {
lp = left_part(cuda_sol_vect, INDEX_X, INDEX_Y);
}
cuda_res_vect[INDEX(ic)] = lp - cuda_rhs_vect[INDEX(ic)];
}
}
void cuda_calc_residual_vector(void *cuda_res_vect, void *cuda_sol_vect, void *cuda_rhs_vect, int (*indexes)[D], double h[D])
{
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_residual_vector<<<GRID_DIM(ic, jc), BLOCK_DIM>>>((double *)cuda_res_vect, (double *)cuda_sol_vect, (double *)cuda_rhs_vect, ic, jc, h[X], h[Y]);
}
__global__ void calc_product(double *cuda_vect1, double *cuda_vect2, double *cuda_acc, int ic, int jc)
{
extern __shared__ double shared[];
double tacc = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
tacc = cuda_vect1[INDEX(ic)] * cuda_vect2[INDEX(ic)];
}
cuda_reduce_sum(shared, THREAD_IDX, tacc);
if (THREAD_IDX == 0) {
atomicAdd(cuda_acc, shared[0]);
}
}
void cuda_calc_product(void *cuda_vect1, void *cuda_vect2, double *acc, int (*indexes)[D])
{
cudaMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_product<<<GRID_DIM(ic, jc), BLOCK_DIM, BLOCK_SIZE * sizeof(double)>>>((double *)cuda_vect1, (double *)cuda_vect2, (double *)cuda_acc, ic, jc);
cudaMemcpy(acc, cuda_acc, sizeof(double), cudaMemcpyDeviceToHost);
}
__global__ void calc_Aproduct(double *cuda_vect1, double *cuda_vect2, double *cuda_acc, int ic, int jc, double hx, double hy)
{
extern __shared__ double shared[];
double tacc = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
tacc = left_part(cuda_vect1, INDEX_X, INDEX_Y) * cuda_vect2[INDEX(ic)];
}
cuda_reduce_sum(shared, THREAD_IDX, tacc);
if (THREAD_IDX == 0) {
atomicAdd(cuda_acc, shared[0]);
}
}
void cuda_calc_Aproduct(void *cuda_vect1, void *cuda_vect2, double *acc, int (*indexes)[D], double h[D])
{
cudaMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_Aproduct<<<GRID_DIM(ic, jc), BLOCK_DIM, BLOCK_SIZE * sizeof(double)>>>((double *)cuda_vect1, (double *)cuda_vect2, (double *)cuda_acc, ic, jc, h[X], h[Y]);
cudaMemcpy(acc, cuda_acc, sizeof(double), cudaMemcpyDeviceToHost);
}
__global__ void calc_it0_solution_vector(double *cuda_sol_vect, double *cuda_res_vect, int ic, int jc, double tau)
{
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
cuda_sol_vect[INDEX(ic)] = -tau * cuda_res_vect[INDEX(ic)];
}
}
void cuda_calc_it0_solution_vector(void *cuda_sol_vect, void *cuda_res_vect, int (*indexes)[D], double tau)
{
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_it0_solution_vector<<<GRID_DIM(ic, jc), BLOCK_DIM>>>((double *)cuda_sol_vect, (double *)cuda_res_vect, ic, jc, tau);
}
__global__ void calc_itn_solution_vector(double *cuda_sol_vect, double *cuda_basis_vect, double *cuda_acc, int ic, int jc, double tau)
{
extern __shared__ double shared[];
double terr = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
double new_value = cuda_sol_vect[INDEX(ic)] - tau * cuda_basis_vect[INDEX(ic)];
terr = fabs(new_value - cuda_sol_vect[INDEX(ic)]);
cuda_sol_vect[INDEX(ic)] = new_value;
}
cuda_reduce_max(shared, THREAD_IDX, terr);
if (THREAD_IDX == 0) {
atomicMax(cuda_acc, shared[0]);
}
}
void cuda_calc_itn_solution_vector(void *cuda_sol_vect, void *cuda_basis_vect, double *err, int (*indexes)[D], double tau)
{
cudaMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_itn_solution_vector<<<GRID_DIM(ic, jc), BLOCK_DIM, BLOCK_SIZE * sizeof(double)>>>((double *)cuda_sol_vect, (double *)cuda_basis_vect, (double *)cuda_acc, ic, jc, tau);
cudaMemcpy(err, cuda_acc, sizeof(double), cudaMemcpyDeviceToHost);
}
__global__ void calc_basis_vector(double *cuda_basis_vect, double *cuda_res_vect, int ic, int jc, double alpha)
{
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
cuda_basis_vect[INDEX(ic)] = cuda_res_vect[INDEX(ic)] - alpha * cuda_basis_vect[INDEX(ic)];
}
}
void cuda_calc_basis_vector(void *cuda_basis_vect, void *cuda_res_vect, int (*indexes)[D], double alpha)
{
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_basis_vector<<<GRID_DIM(ic, jc), BLOCK_DIM>>>((double *)cuda_basis_vect, (double *)cuda_res_vect, ic, jc, alpha);
}
__global__ void calc_error(double *cuda_sol_vect, double *cuda_acc, int ic, int jc, double hx, double hy, double offset_x, double offset_y)
{
extern __shared__ double shared[];
double terr = 0.0;
if (INDEX_X > 0 && INDEX_Y > 0 && INDEX_X < ic - 1 && INDEX_Y < jc - 1) {
double bv = boundary_value((INDEX_X + offset_x - 1) * hx, (INDEX_Y + offset_y - 1) * hy);
terr = fabs(bv - cuda_sol_vect[INDEX(ic)]);
}
cuda_reduce_max(shared, THREAD_IDX, terr);
if (THREAD_IDX == 0) {
atomicMax(cuda_acc, shared[0]);
}
}
void cuda_calc_error(void *cuda_sol_vect, double *err, int (*indexes)[D], double h[D])
{
cudaMemset(cuda_acc, 0, sizeof(double));
int ic = CALC_IC(indexes);
int jc = CALC_JC(indexes);
calc_error<<<GRID_DIM(ic, jc), BLOCK_DIM, BLOCK_SIZE * sizeof(double)>>>((double *)cuda_sol_vect, (double *)cuda_acc, ic, jc, h[X], h[Y], indexes[X][START], indexes[Y][START]);
cudaMemcpy(err, cuda_acc, sizeof(double), cudaMemcpyDeviceToHost);
}
|
ac6eb13488e16ec66efe156e14811c091a5c2aa8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "cml/cml_blas.cuh"
#include "cml/cml_linalg.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "solver.hpp"
#ifdef __MEX__
#define printf mexPrintf
extern "C" int mexPrintf(const char* fmt, ...);
#endif // __MEX__
template <typename T>
void RowToColMajor(const T *Arm, size_t m, size_t n, T *Acm);
template <typename T, typename M>
void Solver(AdmmData<T, M> *admm_data) {
// Extract values from admm_data
size_t n = admm_data->n;
size_t m = admm_data->m;
bool is_skinny = m >= n;
size_t min_dim = ::min(m, n);
const T kOne = static_cast<T>(1);
const T kZero = static_cast<T>(0);
// Create cuBLAS handle.
hipblasHandle_t handle;
hipblasCreate(&handle);
// Allocate data for ADMM variables.
cml::vector<T> z = cml::vector_calloc<T>(m + n);
cml::vector<T> zt = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
cml::vector<T> z_prev = cml::vector_calloc<T>(m + n);
cml::matrix<T> L = cml::matrix_alloc<T>(min_dim, min_dim);
cml::matrix<T> AA = cml::matrix_alloc<T>(min_dim, min_dim);
cml::matrix<T> A = cml::matrix_alloc<T>(m, n);
// Copy A to device (assume input row-major).
T *Acm = new T[m * n];
RowToColMajor(admm_data->A, m, n, Acm);
cml::matrix_memcpy(&A, Acm);
delete [] Acm;
// Copy f and g to device
thrust::device_vector<FunctionObj<T> > f(admm_data->f.begin(),
admm_data->f.end());
thrust::device_vector<FunctionObj<T> > g(admm_data->g.begin(),
admm_data->g.end());
// Create views for x and y components.
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> xt = cml::vector_subvector(&zt, 0, n);
cml::vector<T> yt = cml::vector_subvector(&zt, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
// Compute cholesky decomposition of (I + A^TA) or (I + AA^T)
hipblasOperation_t mult_type = is_skinny ? HIPBLAS_OP_T : HIPBLAS_OP_N;
cml::blas_syrk(handle, HIPBLAS_FILL_MODE_LOWER, mult_type, kOne, &A, kZero,
&AA);
cml::matrix_memcpy(&L, &AA);
cml::matrix_add_constant_diag(&L, kOne);
cml::linalg_cholesky_decomp(handle, &L);
// Signal start of execution.
if (!admm_data->quiet)
printf("%4s %12s %10s %10s %10s %10s\n",
"#", "r norm", "eps_pri", "s norm", "eps_dual", "objective");
T sqrtn_atol = sqrt(static_cast<T>(n)) * admm_data->abs_tol;
for (unsigned int k = 0; k < admm_data->max_iter; ++k) {
// Evaluate Proximal Operators
cml::blas_axpy(handle, -kOne, &xt, &x);
cml::blas_axpy(handle, -kOne, &yt, &y);
ProxEval(g, admm_data->rho, x.data, x12.data);
ProxEval(f, admm_data->rho, y.data, y12.data);
// Project and Update Dual Variables
cml::blas_axpy(handle, kOne, &x12, &xt);
cml::blas_axpy(handle, kOne, &y12, &yt);
if (is_skinny) {
cml::vector_memcpy(&x, &xt);
cml::blas_gemv(handle, HIPBLAS_OP_T, kOne, &A, &yt, kOne, &x);
cml::linalg_cholesky_svx(handle, &L, &x);
cml::blas_gemv(handle, HIPBLAS_OP_N, kOne, &A, &x, kZero, &y);
cml::blas_axpy(handle, -kOne, &y, &yt);
} else {
cml::blas_gemv(handle, HIPBLAS_OP_N, kOne, &A, &xt, kZero, &y);
cml::blas_symv(handle, HIPBLAS_FILL_MODE_LOWER, kOne, &AA, &yt, kOne, &y);
cml::linalg_cholesky_svx(handle, &L, &y);
cml::blas_axpy(handle, -kOne, &y, &yt);
cml::vector_memcpy(&x, &xt);
cml::blas_gemv(handle, HIPBLAS_OP_T, kOne, &A, &yt, kOne, &x);
}
cml::blas_axpy(handle, -kOne, &x, &xt);
// Compute primal and dual tolerances.
T nrm_z = cml::blas_nrm2(handle, &z);
T nrm_zt = cml::blas_nrm2(handle, &zt);
T nrm_z12 = cml::blas_nrm2(handle, &z12);
T eps_pri = sqrtn_atol + admm_data->rel_tol * ::max(nrm_z12, nrm_z);
T eps_dual = sqrtn_atol + admm_data->rel_tol * admm_data->rho * nrm_zt;
// Compute ||r^k||_2 and ||s^k||_2.
cml::blas_axpy(handle, -kOne, &z, &z12);
cml::blas_axpy(handle, -kOne, &z, &z_prev);
T nrm_r = cml::blas_nrm2(handle, &z12);
T nrm_s = admm_data->rho * cml::blas_nrm2(handle, &z_prev);
// Evaluate stopping criteria.
bool converged = nrm_r <= eps_pri && nrm_s <= eps_dual;
if (!admm_data->quiet && (k % 10 == 0 || converged)) {
T obj = FuncEval(f, y.data) + FuncEval(g, x.data);
printf("%4d : %.3e %.3e %.3e %.3e %.3e\n",
k, nrm_r, eps_pri, nrm_s, eps_dual, obj);
}
if (converged)
break;
// Make copy of z.
cml::vector_memcpy(&z_prev, &z);
}
// Copy results to output.
if (admm_data->y != 0)
cml::vector_memcpy(admm_data->y, &y);
if (admm_data->x != 0)
cml::vector_memcpy(admm_data->x, &x);
// Free up memory.
cml::matrix_free(&L);
cml::matrix_free(&AA);
cml::matrix_free(&A);
cml::vector_free(&z);
cml::vector_free(&zt);
cml::vector_free(&z12);
cml::vector_free(&z_prev);
}
template <typename T>
void RowToColMajor(const T *Arm, size_t m, size_t n, T *Acm) {
for (unsigned int i = 0; i < m; ++i)
for (unsigned int j = 0; j < n; ++j)
Acm[j * m + i] = Arm[i * n + j];
}
template void Solver<double>(AdmmData<double, double*> *);
template void Solver<float>(AdmmData<float, float*> *);
|
ac6eb13488e16ec66efe156e14811c091a5c2aa8.cu
|
#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "cml/cml_blas.cuh"
#include "cml/cml_linalg.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "solver.hpp"
#ifdef __MEX__
#define printf mexPrintf
extern "C" int mexPrintf(const char* fmt, ...);
#endif // __MEX__
template <typename T>
void RowToColMajor(const T *Arm, size_t m, size_t n, T *Acm);
template <typename T, typename M>
void Solver(AdmmData<T, M> *admm_data) {
// Extract values from admm_data
size_t n = admm_data->n;
size_t m = admm_data->m;
bool is_skinny = m >= n;
size_t min_dim = std::min(m, n);
const T kOne = static_cast<T>(1);
const T kZero = static_cast<T>(0);
// Create cuBLAS handle.
cublasHandle_t handle;
cublasCreate(&handle);
// Allocate data for ADMM variables.
cml::vector<T> z = cml::vector_calloc<T>(m + n);
cml::vector<T> zt = cml::vector_calloc<T>(m + n);
cml::vector<T> z12 = cml::vector_calloc<T>(m + n);
cml::vector<T> z_prev = cml::vector_calloc<T>(m + n);
cml::matrix<T> L = cml::matrix_alloc<T>(min_dim, min_dim);
cml::matrix<T> AA = cml::matrix_alloc<T>(min_dim, min_dim);
cml::matrix<T> A = cml::matrix_alloc<T>(m, n);
// Copy A to device (assume input row-major).
T *Acm = new T[m * n];
RowToColMajor(admm_data->A, m, n, Acm);
cml::matrix_memcpy(&A, Acm);
delete [] Acm;
// Copy f and g to device
thrust::device_vector<FunctionObj<T> > f(admm_data->f.begin(),
admm_data->f.end());
thrust::device_vector<FunctionObj<T> > g(admm_data->g.begin(),
admm_data->g.end());
// Create views for x and y components.
cml::vector<T> x = cml::vector_subvector(&z, 0, n);
cml::vector<T> y = cml::vector_subvector(&z, n, m);
cml::vector<T> xt = cml::vector_subvector(&zt, 0, n);
cml::vector<T> yt = cml::vector_subvector(&zt, n, m);
cml::vector<T> x12 = cml::vector_subvector(&z12, 0, n);
cml::vector<T> y12 = cml::vector_subvector(&z12, n, m);
// Compute cholesky decomposition of (I + A^TA) or (I + AA^T)
cublasOperation_t mult_type = is_skinny ? CUBLAS_OP_T : CUBLAS_OP_N;
cml::blas_syrk(handle, CUBLAS_FILL_MODE_LOWER, mult_type, kOne, &A, kZero,
&AA);
cml::matrix_memcpy(&L, &AA);
cml::matrix_add_constant_diag(&L, kOne);
cml::linalg_cholesky_decomp(handle, &L);
// Signal start of execution.
if (!admm_data->quiet)
printf("%4s %12s %10s %10s %10s %10s\n",
"#", "r norm", "eps_pri", "s norm", "eps_dual", "objective");
T sqrtn_atol = sqrt(static_cast<T>(n)) * admm_data->abs_tol;
for (unsigned int k = 0; k < admm_data->max_iter; ++k) {
// Evaluate Proximal Operators
cml::blas_axpy(handle, -kOne, &xt, &x);
cml::blas_axpy(handle, -kOne, &yt, &y);
ProxEval(g, admm_data->rho, x.data, x12.data);
ProxEval(f, admm_data->rho, y.data, y12.data);
// Project and Update Dual Variables
cml::blas_axpy(handle, kOne, &x12, &xt);
cml::blas_axpy(handle, kOne, &y12, &yt);
if (is_skinny) {
cml::vector_memcpy(&x, &xt);
cml::blas_gemv(handle, CUBLAS_OP_T, kOne, &A, &yt, kOne, &x);
cml::linalg_cholesky_svx(handle, &L, &x);
cml::blas_gemv(handle, CUBLAS_OP_N, kOne, &A, &x, kZero, &y);
cml::blas_axpy(handle, -kOne, &y, &yt);
} else {
cml::blas_gemv(handle, CUBLAS_OP_N, kOne, &A, &xt, kZero, &y);
cml::blas_symv(handle, CUBLAS_FILL_MODE_LOWER, kOne, &AA, &yt, kOne, &y);
cml::linalg_cholesky_svx(handle, &L, &y);
cml::blas_axpy(handle, -kOne, &y, &yt);
cml::vector_memcpy(&x, &xt);
cml::blas_gemv(handle, CUBLAS_OP_T, kOne, &A, &yt, kOne, &x);
}
cml::blas_axpy(handle, -kOne, &x, &xt);
// Compute primal and dual tolerances.
T nrm_z = cml::blas_nrm2(handle, &z);
T nrm_zt = cml::blas_nrm2(handle, &zt);
T nrm_z12 = cml::blas_nrm2(handle, &z12);
T eps_pri = sqrtn_atol + admm_data->rel_tol * std::max(nrm_z12, nrm_z);
T eps_dual = sqrtn_atol + admm_data->rel_tol * admm_data->rho * nrm_zt;
// Compute ||r^k||_2 and ||s^k||_2.
cml::blas_axpy(handle, -kOne, &z, &z12);
cml::blas_axpy(handle, -kOne, &z, &z_prev);
T nrm_r = cml::blas_nrm2(handle, &z12);
T nrm_s = admm_data->rho * cml::blas_nrm2(handle, &z_prev);
// Evaluate stopping criteria.
bool converged = nrm_r <= eps_pri && nrm_s <= eps_dual;
if (!admm_data->quiet && (k % 10 == 0 || converged)) {
T obj = FuncEval(f, y.data) + FuncEval(g, x.data);
printf("%4d : %.3e %.3e %.3e %.3e %.3e\n",
k, nrm_r, eps_pri, nrm_s, eps_dual, obj);
}
if (converged)
break;
// Make copy of z.
cml::vector_memcpy(&z_prev, &z);
}
// Copy results to output.
if (admm_data->y != 0)
cml::vector_memcpy(admm_data->y, &y);
if (admm_data->x != 0)
cml::vector_memcpy(admm_data->x, &x);
// Free up memory.
cml::matrix_free(&L);
cml::matrix_free(&AA);
cml::matrix_free(&A);
cml::vector_free(&z);
cml::vector_free(&zt);
cml::vector_free(&z12);
cml::vector_free(&z_prev);
}
template <typename T>
void RowToColMajor(const T *Arm, size_t m, size_t n, T *Acm) {
for (unsigned int i = 0; i < m; ++i)
for (unsigned int j = 0; j < n; ++j)
Acm[j * m + i] = Arm[i * n + j];
}
template void Solver<double>(AdmmData<double, double*> *);
template void Solver<float>(AdmmData<float, float*> *);
|
4c3da4f19129c3b9948ab90c4f56bbc5bd133532.hip
|
// !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include <hip/hip_runtime_api.h>
#include "glm/glm.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <iostream>
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__device__ glm::vec3 reflect(glm::vec3 const & I, glm::vec3 const & N)
{
return I - 2.0f * glm::dot(N, I) * N;
}
__device__ bool isRayUnblocked(glm::vec3 const & point1, glm::vec3 const & point2, staticGeom* geoms, int numberOfGeoms)
{
glm::vec3 DIRECTION(point2 - point1);
float DISTANCE = glm::length(DIRECTION);
// Offset start position in ray direction by small distance to prevent self collisions
float DELTA = 0.001f;
ray r;
r.origin = point1 + DELTA * DIRECTION;
r.direction = glm::normalize(DIRECTION);
for (int i=0; i<numberOfGeoms; ++i)
{
glm::vec3 intersectionPoint;
glm::vec3 normal;
float intersectionDistance = geomIntersectionTest(geoms[i], r, intersectionPoint, normal);
// Does not intersect so check next primitive
if (intersectionDistance <= 0.0f) continue;
// Take into consideration intersection only between the two points.
if (intersectionDistance < DISTANCE) return false;
}
return true;
}
/*
__global__ void raytraceRay(glm::vec2 resolution, int time, float bounce, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, ray* d_rays)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if ( x >= resolution.x || y >= resolution.y ) return;
ray r = d_rays[index];
// ============================================
// Determine closest intersection with geometry
// ============================================
float distance = -1.0f;
glm::vec3 intersection;
glm::vec3 normal;
int materialIdx;
for (int i = 0; i < numberOfGeoms; ++i)
{
float newDistance;
glm::vec3 newIntersection;
glm::vec3 newNormal;
switch (geoms[i].type)
{
case SPHERE:
newDistance = sphereIntersectionTest(geoms[i], r, newIntersection, newNormal);
break;
case CUBE:
newDistance = boxIntersectionTest(geoms[i], r, newIntersection, newNormal);
break;
case MESH:
newDistance = -1.0f;
break;
}
if ( newDistance < 0.0f ) continue;
if ( distance < 0.0f || (distance > 0.0f && newDistance < distance) )
{
distance = newDistance;
intersection = newIntersection;
normal = newNormal;
materialIdx = geoms[i].materialid;
}
}
// ============================================
// Paint pixel
// ============================================
// No hit
if ( distance < 0.0f )
{
colors[index] = glm::vec3(0.0f, 0.0f, 0.0f);
//colors[index] = generateRandomNumberFromThread(resolution, time, x, y);
return;
}
// Simple local reflectance model (local illumination model formula)
float reflectivity = 0.0f;
float transmittance = 1.0f - reflectivity;
glm::vec3 materialColor = materials[materialIdx].color;
glm::vec3 reflectedColor(0.0f, 0.0f, 0.0f);
glm::vec3 ambientLightColor(1.0f, 1.0f, 1.0f);
float AMBIENT_WEIGHT = 0.2f; // Ka - Ambient reflectivity factor
float DIFFUSE_WEIGHT = 0.3f; // Kd - Diffuse reflectivity factor
float SPECULAR_WEIGHT = 0.5f; // Ks - Specular reflectivity factor
glm::vec3 lightColor(1.0f, 1.0f, 1.0f);
glm::vec3 color = AMBIENT_WEIGHT * ambientLightColor * materialColor;
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(-0.15f, 0.15f);
for ( int i = 0; i < 1; ++i)
{
glm::vec3 lightPosition(0.25f + (float) u01(rng), 1.0f, (float) u01(rng));
// Unit vector from intersection point to light source
glm::vec3 LIGHT_DIRECTION = glm::normalize(lightPosition - intersection);
// Direction of reflected light at intersection point
glm::vec3 LIGHT_REFLECTION = glm::normalize(reflect(-1.0f*LIGHT_DIRECTION, normal));
// Determine diffuse term
float diffuseTerm;
diffuseTerm = glm::dot(normal, LIGHT_DIRECTION);
diffuseTerm = glm::clamp(diffuseTerm, 0.0f, 1.0f);
// Determine specular term
float specularTerm = 0.0f;
if ( materials[materialIdx].specularExponent - 0.0f > 0.001f )
{
float SPECULAR_EXPONENT = materials[materialIdx].specularExponent;
glm::vec3 EYE_DIRECTION = glm::normalize(cam.position - intersection);
specularTerm = glm::dot(LIGHT_REFLECTION, EYE_DIRECTION);
specularTerm = pow(fmaxf(specularTerm, 0.0f), SPECULAR_EXPONENT);
specularTerm = glm::clamp(specularTerm, 0.0f, 1.0f);
}
if (isRayUnblocked(intersection, lightPosition, geoms, numberOfGeoms))
{
color += DIFFUSE_WEIGHT * lightColor * materialColor * diffuseTerm / 1.0f;
color += SPECULAR_WEIGHT * lightColor * specularTerm / 1.0f;
}
}
glm::vec3 new_color = reflectivity*reflectedColor + transmittance*color;
if ( time > 1 )
{
colors[index] += (new_color - colors[index]) / (float)time;
return;
}
colors[index] = new_color;
}
*/
// Requires:
// x = 0 to width-1
// y = 0 to height-1
// Jittering based only on random_seed (not x or y).
__host__ __device__ glm::vec3 GetRayDirectionFromCamera(const cameraData& cam, int x, int y, int random_seed)
{
float random1, random2; // Random # between 0 and 1 (from random_seed).
// Set random numbers.
{
thrust::default_random_engine rng(hash(random_seed));
thrust::uniform_real_distribution<float> u01(0,1);
random1 = u01(rng);
random2 = u01(rng);
}
float width = (float) cam.resolution.x;
float height = (float) cam.resolution.y;
glm::vec3 c(cam.view); // View direction (unit vector) from eye.
glm::vec3 e(cam.position); // Camera center position.
glm::vec3 m = e + c; // Midpoint of screen.
glm::vec3 u(cam.up); // Up vector.
glm::vec3 a = glm::cross(c, u); // c x u TODO: make sure this is well defined
glm::vec3 b = glm::cross(a, c); // a x c TODO: make sure this is well defined
glm::vec3 v; // Vertical vector from "m" to top of screen.
glm::vec3 h; // Horizontal vector from "m" to right of screen.
// Calculate v & h
{
float phi = cam.fov.y * PI / 180.0f / 2.0f;
float screen_ratio = height / width;
v = b * tan(phi) / (float)glm::length(b);
float theta = atan(glm::length(v)/screen_ratio / (float)glm::length(c));
h = a * (float)glm::length(c) * tan(theta) / (float)glm::length(a);
}
// Obtain a unit vector in the direction from the eye to a pixel point (x, y) on screen
float sx = (x + random1) / width; // Without jitter: x / (width - 1.0f)
float sy = (y + random2) / height; // y / (height - 1.0f)
glm::vec3 p = m - (2*sx - 1)*h - (2*sy - 1)*v; // World position of point (x, y) on screen
return glm::normalize(p-e);
}
// Initialize all rays using camera data.
// # of rays = # of pixels
__global__ void InitRay(cameraData cam, int random_seed, ray* d_rays, glm::vec3* d_lights, bool* d_is_ray_alive, int* d_ray_idx)
{
int width = cam.resolution.x;
int height = cam.resolution.y;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + (y * width);
if ( x >= width || y >= height ) return;
d_rays[idx].origin = cam.position;
d_rays[idx].direction = GetRayDirectionFromCamera(cam, x, y, random_seed);
d_lights[idx] = glm::vec3(1.0f);
d_is_ray_alive[idx] = true;
d_ray_idx[idx] = idx;
}
// Modifies:
// p: Intersection point.
// n: Normal unit vector at intersection.
// material_id: Of intersected object.
// Return true if intersected.
__device__ bool GetClosestIntersection(ray& r, staticGeom* geoms, int num_geoms, material* materials,
glm::vec3& p, glm::vec3& n, int& material_id)
{
float distance = -1.0f;
for ( int i=0; i < num_geoms; ++i )
{
// Ignore emitters.
//if ( IsEmitter(geoms[i].materialid, materials) ) continue;
glm::vec3 new_intersection;
glm::vec3 new_normal;
float new_distance = geomIntersectionTest(geoms[i], r, new_intersection, new_normal);
if ( new_distance < 0.0f ) continue;
if ( distance < 0.0f || (distance > 0.0f && new_distance < distance) )
{
distance = new_distance;
p = new_intersection;
n = new_normal;
material_id = geoms[i].materialid;
}
}
if ( distance < 0.0f) return false;
return true;
}
__host__ __device__ bool IsEmitter(int id, material* materials)
{
return ( materials[id].emittance > 0.5f );
}
__device__ void SetAverageColor(glm::vec3* colors, int idx, glm::vec3& new_color, int iterations)
{
if ( iterations > 1 )
{
colors[idx] += (new_color - colors[idx]) / (float)iterations;
return;
}
colors[idx] = new_color;
}
__global__ void TraceRay(int iterations, int depth, int max_depth, int num_pixels, ray* d_rays, int num_rays, glm::vec3* d_lights, bool* d_is_ray_alive, int* d_ray_idx,
glm::vec3* colors, staticGeom* geoms, int num_geoms, material* materials, int num_materials)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
/*
int debug_i = 0;
if (x == 641 && y == 177)
{
debug_i ++;
}
debug_i ++;
*/
if ( idx >= num_rays ) return;
if ( !d_is_ray_alive[idx] ) return;
// Copy global memory to register.
ray ray_in = d_rays[idx];
glm::vec3 light = d_lights[idx];
bool is_intersected;
glm::vec3 p; // Intersection point.
glm::vec3 n; // Normal unit vector at intersection.
int material_id; // Of intersected object.
is_intersected = GetClosestIntersection(ray_in, geoms, num_geoms, materials, p, n, material_id);
// No hit, return (light * bg).
if ( !is_intersected )
{
glm::vec3 bg_color(0.2f);
glm::vec3 new_color = light * bg_color;
d_is_ray_alive[idx] = false;
SetAverageColor(colors, d_ray_idx[idx], new_color, iterations);
return;
}
// Hit emitter, return (light * emitter).
if ( IsEmitter(material_id, materials) )
{
glm::vec3 new_color = light * materials[material_id].color * materials[material_id].emittance;
d_is_ray_alive[idx] = false;
SetAverageColor(colors, d_ray_idx[idx], new_color, iterations);
return;
}
// Make ray_out in random direction.
ray ray_out;
//ray_out.direction = UniformRandomHemisphereDirection(n, (float) (iterations-1) * max_depth * num_pixels + depth * num_pixels + idx);
float xi1, xi2;
{
thrust::default_random_engine rng(hash((float) iterations * (depth+1) * idx));
thrust::uniform_real_distribution<float> u01(0,1);
xi1 = u01(rng);
xi2 = u01(rng);
}
if ( materials[material_id].hasReflective )
{
ray_out.direction = reflect(ray_in.direction, glm::normalize(n));
}
else
{
ray_out.direction = calculateRandomDirectionInHemisphere(glm::normalize(n), xi1, xi2);
}
ray_out.origin = p + 0.001f * ray_out.direction;
// Update light & ray.
d_lights[idx] = light * materials[material_id].color;
d_rays[idx] = ray_out;
// Kill rays with negligible throughput.
// Direct illumination.
// For each light...
/*
int num_lights = 0;
for ( int i=0; i < num_geoms; ++i )
{
// Ignore non-emitters.
if ( materials[geoms[i].materialid].emittance < 0.5f ) continue;
++ num_lights;
// 1) Sample a point on light
glm::vec3 point_on_light;
point_on_light = getRandomPointOnGeom(geoms[i], iterations+depth);
// 2) L += [throughput] * [avg of visible lights]
glm::vec3 direct_L(0.0f);
if ( isRayUnblocked(p, point_on_light, geoms, num_geoms) )
{
direct_L += throughput * materials[geoms[i].materialid].color
}
L += direct_L / (float) num_lights;
}
throughput = throughput * materials[material_id].color;
//glm::vec3 new_color = ;
SetAverageColor(colors, idx, new_color, iterations);
*/
}
__global__ void CompactRays(int* td_v, ray* d_rays, glm::vec3* d_lights, bool* d_is_ray_alive, int* d_ray_idx, int num_rays,
ray* d_rays_copy, glm::vec3* d_lights_copy, bool* d_is_ray_alive_copy, int* d_ray_idx_copy)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( idx >= num_rays ) return;
if ( !d_is_ray_alive[idx] ) return;
int copy_idx = td_v[idx];
d_rays_copy[copy_idx] = d_rays[idx];
d_lights_copy[copy_idx] = d_lights[idx];
d_is_ray_alive_copy[copy_idx] = true;
d_ray_idx_copy[copy_idx] = d_ray_idx[idx];
}
// Wrapper for the __global__ call that sets up the kernel calls and does memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* cam, int frame, int iterations, material* materials, int num_materials, geom* geoms, int num_geoms)
{
int width = cam->resolution.x;
int height = cam->resolution.y;
int num_pixels = width * height;
// Device memory size.
int tile_size = 8;
dim3 threadsPerBlock(tile_size, tile_size);
dim3 fullBlocksPerGrid(ceil((float)width/tile_size), ceil((float)height/tile_size));
// Copy image to GPU.
glm::vec3* d_image = NULL;
hipMalloc((void**)&d_image, num_pixels*sizeof(glm::vec3));
hipMemcpy(d_image, cam->image, num_pixels*sizeof(glm::vec3), hipMemcpyHostToDevice);
// Package geometry.
staticGeom* geomList = new staticGeom[num_geoms];
for ( int i=0; i<num_geoms; ++i )
{
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
// Copy geometry to GPU.
staticGeom* d_geoms = NULL;
hipMalloc((void**)&d_geoms, num_geoms*sizeof(staticGeom));
hipMemcpy( d_geoms, geomList, num_geoms*sizeof(staticGeom), hipMemcpyHostToDevice);
// Copy materials to GPU.
material* cudamaterials = NULL;
hipMalloc((void**)&cudamaterials, num_materials*sizeof(material));
hipMemcpy( cudamaterials, materials, num_materials*sizeof(material), hipMemcpyHostToDevice);
// Package camera.
cameraData cam_data;
cam_data.resolution = cam->resolution;
cam_data.position = cam->positions[frame];
cam_data.view = cam->views[frame];
cam_data.up = cam->ups[frame];
cam_data.fov = cam->fov;
// Allocate GPU memory for rays & initialize them.
ray* d_rays = NULL;
glm::vec3* d_lights = NULL;
bool* d_is_ray_alive = NULL;
int* d_ray_idx = NULL;
hipMalloc((void**)&d_rays, num_pixels*sizeof(ray));
hipMalloc((void**)&d_lights, num_pixels*sizeof(glm::vec3));
hipMalloc((void**)&d_is_ray_alive, num_pixels*sizeof(bool));
hipMalloc((void**)&d_ray_idx, num_pixels*sizeof(int));
hipLaunchKernelGGL(( InitRay), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cam_data, iterations, d_rays, d_lights, d_is_ray_alive, d_ray_idx);
// Start raytracer kernel.
int num_rays = num_pixels;
int max_depth = 10; // # of bounces when raytracing.
for ( int depth = 0; depth < max_depth; ++depth )
{
// Determine # of kernels to launch based on # of rays.
int num_threads_per_block = 128;
int num_blocks_per_grid = ceil((float)num_rays / num_threads_per_block);
// Update d_rays & d_lights based on intersected object.
hipLaunchKernelGGL(( TraceRay), dim3(num_blocks_per_grid), dim3(num_threads_per_block), 0, 0, iterations, depth, max_depth, num_pixels, d_rays, num_rays, d_lights, d_is_ray_alive, d_ray_idx, d_image, d_geoms, num_geoms, cudamaterials, num_materials);
// Update d_rays by removing dead rays (stream compaction).
thrust::device_ptr<bool> td_is_ray_alive = thrust::device_pointer_cast(d_is_ray_alive);
thrust::device_vector<int> td_v(num_rays);
thrust::exclusive_scan(td_is_ray_alive, td_is_ray_alive + num_rays, td_v.begin());
// Allocate device memory for storing copy.
int num_copy_rays = td_v[num_rays-1] + (int) td_is_ray_alive[num_rays-1];
ray* d_rays_copy = NULL;
glm::vec3* d_lights_copy = NULL;
bool* d_is_ray_alive_copy = NULL;
int* d_ray_idx_copy = NULL;
hipMalloc((void**)&d_rays_copy, num_copy_rays*sizeof(ray));
hipMalloc((void**)&d_lights_copy, num_copy_rays*sizeof(glm::vec3));
hipMalloc((void**)&d_is_ray_alive_copy, num_copy_rays*sizeof(bool));
hipMalloc((void**)&d_ray_idx_copy, num_copy_rays*sizeof(int));
// Only copy living rays.
hipLaunchKernelGGL(( CompactRays), dim3(num_blocks_per_grid), dim3(num_threads_per_block), 0, 0, thrust::raw_pointer_cast(td_v.data()), d_rays, d_lights, d_is_ray_alive, d_ray_idx, num_rays, d_rays_copy, d_lights_copy, d_is_ray_alive_copy, d_ray_idx_copy);
hipDeviceSynchronize();
// Free old memory & update pointers to the copies.
hipFree(d_rays);
hipFree(d_lights);
hipFree(d_is_ray_alive);
hipFree(d_ray_idx);
num_rays = num_copy_rays;
d_rays = d_rays_copy;
d_lights = d_lights_copy;
d_is_ray_alive = d_is_ray_alive_copy;
d_ray_idx = d_ray_idx_copy;
}
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, cam->resolution, d_image);
// Retrieve image from GPU.
hipMemcpy( cam->image, d_image, num_pixels*sizeof(glm::vec3), hipMemcpyDeviceToHost);
// Free memory.
hipFree( d_image );
hipFree( d_geoms );
hipFree( cudamaterials );
hipFree( d_rays );
hipFree( d_lights );
hipFree( d_is_ray_alive );
hipFree( d_ray_idx );
delete [] geomList;
// Make sure the kernel has completed.
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
|
4c3da4f19129c3b9948ab90c4f56bbc5bd133532.cu
|
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include <cuda_runtime_api.h>
#include "glm/glm.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <iostream>
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__device__ glm::vec3 reflect(glm::vec3 const & I, glm::vec3 const & N)
{
return I - 2.0f * glm::dot(N, I) * N;
}
__device__ bool isRayUnblocked(glm::vec3 const & point1, glm::vec3 const & point2, staticGeom* geoms, int numberOfGeoms)
{
glm::vec3 DIRECTION(point2 - point1);
float DISTANCE = glm::length(DIRECTION);
// Offset start position in ray direction by small distance to prevent self collisions
float DELTA = 0.001f;
ray r;
r.origin = point1 + DELTA * DIRECTION;
r.direction = glm::normalize(DIRECTION);
for (int i=0; i<numberOfGeoms; ++i)
{
glm::vec3 intersectionPoint;
glm::vec3 normal;
float intersectionDistance = geomIntersectionTest(geoms[i], r, intersectionPoint, normal);
// Does not intersect so check next primitive
if (intersectionDistance <= 0.0f) continue;
// Take into consideration intersection only between the two points.
if (intersectionDistance < DISTANCE) return false;
}
return true;
}
/*
__global__ void raytraceRay(glm::vec2 resolution, int time, float bounce, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials, ray* d_rays)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if ( x >= resolution.x || y >= resolution.y ) return;
ray r = d_rays[index];
// ============================================
// Determine closest intersection with geometry
// ============================================
float distance = -1.0f;
glm::vec3 intersection;
glm::vec3 normal;
int materialIdx;
for (int i = 0; i < numberOfGeoms; ++i)
{
float newDistance;
glm::vec3 newIntersection;
glm::vec3 newNormal;
switch (geoms[i].type)
{
case SPHERE:
newDistance = sphereIntersectionTest(geoms[i], r, newIntersection, newNormal);
break;
case CUBE:
newDistance = boxIntersectionTest(geoms[i], r, newIntersection, newNormal);
break;
case MESH:
newDistance = -1.0f;
break;
}
if ( newDistance < 0.0f ) continue;
if ( distance < 0.0f || (distance > 0.0f && newDistance < distance) )
{
distance = newDistance;
intersection = newIntersection;
normal = newNormal;
materialIdx = geoms[i].materialid;
}
}
// ============================================
// Paint pixel
// ============================================
// No hit
if ( distance < 0.0f )
{
colors[index] = glm::vec3(0.0f, 0.0f, 0.0f);
//colors[index] = generateRandomNumberFromThread(resolution, time, x, y);
return;
}
// Simple local reflectance model (local illumination model formula)
float reflectivity = 0.0f;
float transmittance = 1.0f - reflectivity;
glm::vec3 materialColor = materials[materialIdx].color;
glm::vec3 reflectedColor(0.0f, 0.0f, 0.0f);
glm::vec3 ambientLightColor(1.0f, 1.0f, 1.0f);
float AMBIENT_WEIGHT = 0.2f; // Ka - Ambient reflectivity factor
float DIFFUSE_WEIGHT = 0.3f; // Kd - Diffuse reflectivity factor
float SPECULAR_WEIGHT = 0.5f; // Ks - Specular reflectivity factor
glm::vec3 lightColor(1.0f, 1.0f, 1.0f);
glm::vec3 color = AMBIENT_WEIGHT * ambientLightColor * materialColor;
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(-0.15f, 0.15f);
for ( int i = 0; i < 1; ++i)
{
glm::vec3 lightPosition(0.25f + (float) u01(rng), 1.0f, (float) u01(rng));
// Unit vector from intersection point to light source
glm::vec3 LIGHT_DIRECTION = glm::normalize(lightPosition - intersection);
// Direction of reflected light at intersection point
glm::vec3 LIGHT_REFLECTION = glm::normalize(reflect(-1.0f*LIGHT_DIRECTION, normal));
// Determine diffuse term
float diffuseTerm;
diffuseTerm = glm::dot(normal, LIGHT_DIRECTION);
diffuseTerm = glm::clamp(diffuseTerm, 0.0f, 1.0f);
// Determine specular term
float specularTerm = 0.0f;
if ( materials[materialIdx].specularExponent - 0.0f > 0.001f )
{
float SPECULAR_EXPONENT = materials[materialIdx].specularExponent;
glm::vec3 EYE_DIRECTION = glm::normalize(cam.position - intersection);
specularTerm = glm::dot(LIGHT_REFLECTION, EYE_DIRECTION);
specularTerm = pow(fmaxf(specularTerm, 0.0f), SPECULAR_EXPONENT);
specularTerm = glm::clamp(specularTerm, 0.0f, 1.0f);
}
if (isRayUnblocked(intersection, lightPosition, geoms, numberOfGeoms))
{
color += DIFFUSE_WEIGHT * lightColor * materialColor * diffuseTerm / 1.0f;
color += SPECULAR_WEIGHT * lightColor * specularTerm / 1.0f;
}
}
glm::vec3 new_color = reflectivity*reflectedColor + transmittance*color;
if ( time > 1 )
{
colors[index] += (new_color - colors[index]) / (float)time;
return;
}
colors[index] = new_color;
}
*/
// Requires:
// x = 0 to width-1
// y = 0 to height-1
// Jittering based only on random_seed (not x or y).
__host__ __device__ glm::vec3 GetRayDirectionFromCamera(const cameraData& cam, int x, int y, int random_seed)
{
float random1, random2; // Random # between 0 and 1 (from random_seed).
// Set random numbers.
{
thrust::default_random_engine rng(hash(random_seed));
thrust::uniform_real_distribution<float> u01(0,1);
random1 = u01(rng);
random2 = u01(rng);
}
float width = (float) cam.resolution.x;
float height = (float) cam.resolution.y;
glm::vec3 c(cam.view); // View direction (unit vector) from eye.
glm::vec3 e(cam.position); // Camera center position.
glm::vec3 m = e + c; // Midpoint of screen.
glm::vec3 u(cam.up); // Up vector.
glm::vec3 a = glm::cross(c, u); // c x u TODO: make sure this is well defined
glm::vec3 b = glm::cross(a, c); // a x c TODO: make sure this is well defined
glm::vec3 v; // Vertical vector from "m" to top of screen.
glm::vec3 h; // Horizontal vector from "m" to right of screen.
// Calculate v & h
{
float phi = cam.fov.y * PI / 180.0f / 2.0f;
float screen_ratio = height / width;
v = b * tan(phi) / (float)glm::length(b);
float theta = atan(glm::length(v)/screen_ratio / (float)glm::length(c));
h = a * (float)glm::length(c) * tan(theta) / (float)glm::length(a);
}
// Obtain a unit vector in the direction from the eye to a pixel point (x, y) on screen
float sx = (x + random1) / width; // Without jitter: x / (width - 1.0f)
float sy = (y + random2) / height; // y / (height - 1.0f)
glm::vec3 p = m - (2*sx - 1)*h - (2*sy - 1)*v; // World position of point (x, y) on screen
return glm::normalize(p-e);
}
// Initialize all rays using camera data.
// # of rays = # of pixels
__global__ void InitRay(cameraData cam, int random_seed, ray* d_rays, glm::vec3* d_lights, bool* d_is_ray_alive, int* d_ray_idx)
{
int width = cam.resolution.x;
int height = cam.resolution.y;
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + (y * width);
if ( x >= width || y >= height ) return;
d_rays[idx].origin = cam.position;
d_rays[idx].direction = GetRayDirectionFromCamera(cam, x, y, random_seed);
d_lights[idx] = glm::vec3(1.0f);
d_is_ray_alive[idx] = true;
d_ray_idx[idx] = idx;
}
// Modifies:
// p: Intersection point.
// n: Normal unit vector at intersection.
// material_id: Of intersected object.
// Return true if intersected.
__device__ bool GetClosestIntersection(ray& r, staticGeom* geoms, int num_geoms, material* materials,
glm::vec3& p, glm::vec3& n, int& material_id)
{
float distance = -1.0f;
for ( int i=0; i < num_geoms; ++i )
{
// Ignore emitters.
//if ( IsEmitter(geoms[i].materialid, materials) ) continue;
glm::vec3 new_intersection;
glm::vec3 new_normal;
float new_distance = geomIntersectionTest(geoms[i], r, new_intersection, new_normal);
if ( new_distance < 0.0f ) continue;
if ( distance < 0.0f || (distance > 0.0f && new_distance < distance) )
{
distance = new_distance;
p = new_intersection;
n = new_normal;
material_id = geoms[i].materialid;
}
}
if ( distance < 0.0f) return false;
return true;
}
__host__ __device__ bool IsEmitter(int id, material* materials)
{
return ( materials[id].emittance > 0.5f );
}
__device__ void SetAverageColor(glm::vec3* colors, int idx, glm::vec3& new_color, int iterations)
{
if ( iterations > 1 )
{
colors[idx] += (new_color - colors[idx]) / (float)iterations;
return;
}
colors[idx] = new_color;
}
__global__ void TraceRay(int iterations, int depth, int max_depth, int num_pixels, ray* d_rays, int num_rays, glm::vec3* d_lights, bool* d_is_ray_alive, int* d_ray_idx,
glm::vec3* colors, staticGeom* geoms, int num_geoms, material* materials, int num_materials)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
/*
int debug_i = 0;
if (x == 641 && y == 177)
{
debug_i ++;
}
debug_i ++;
*/
if ( idx >= num_rays ) return;
if ( !d_is_ray_alive[idx] ) return;
// Copy global memory to register.
ray ray_in = d_rays[idx];
glm::vec3 light = d_lights[idx];
bool is_intersected;
glm::vec3 p; // Intersection point.
glm::vec3 n; // Normal unit vector at intersection.
int material_id; // Of intersected object.
is_intersected = GetClosestIntersection(ray_in, geoms, num_geoms, materials, p, n, material_id);
// No hit, return (light * bg).
if ( !is_intersected )
{
glm::vec3 bg_color(0.2f);
glm::vec3 new_color = light * bg_color;
d_is_ray_alive[idx] = false;
SetAverageColor(colors, d_ray_idx[idx], new_color, iterations);
return;
}
// Hit emitter, return (light * emitter).
if ( IsEmitter(material_id, materials) )
{
glm::vec3 new_color = light * materials[material_id].color * materials[material_id].emittance;
d_is_ray_alive[idx] = false;
SetAverageColor(colors, d_ray_idx[idx], new_color, iterations);
return;
}
// Make ray_out in random direction.
ray ray_out;
//ray_out.direction = UniformRandomHemisphereDirection(n, (float) (iterations-1) * max_depth * num_pixels + depth * num_pixels + idx);
float xi1, xi2;
{
thrust::default_random_engine rng(hash((float) iterations * (depth+1) * idx));
thrust::uniform_real_distribution<float> u01(0,1);
xi1 = u01(rng);
xi2 = u01(rng);
}
if ( materials[material_id].hasReflective )
{
ray_out.direction = reflect(ray_in.direction, glm::normalize(n));
}
else
{
ray_out.direction = calculateRandomDirectionInHemisphere(glm::normalize(n), xi1, xi2);
}
ray_out.origin = p + 0.001f * ray_out.direction;
// Update light & ray.
d_lights[idx] = light * materials[material_id].color;
d_rays[idx] = ray_out;
// Kill rays with negligible throughput.
// Direct illumination.
// For each light...
/*
int num_lights = 0;
for ( int i=0; i < num_geoms; ++i )
{
// Ignore non-emitters.
if ( materials[geoms[i].materialid].emittance < 0.5f ) continue;
++ num_lights;
// 1) Sample a point on light
glm::vec3 point_on_light;
point_on_light = getRandomPointOnGeom(geoms[i], iterations+depth);
// 2) L += [throughput] * [avg of visible lights]
glm::vec3 direct_L(0.0f);
if ( isRayUnblocked(p, point_on_light, geoms, num_geoms) )
{
direct_L += throughput * materials[geoms[i].materialid].color
}
L += direct_L / (float) num_lights;
}
throughput = throughput * materials[material_id].color;
//glm::vec3 new_color = ;
SetAverageColor(colors, idx, new_color, iterations);
*/
}
__global__ void CompactRays(int* td_v, ray* d_rays, glm::vec3* d_lights, bool* d_is_ray_alive, int* d_ray_idx, int num_rays,
ray* d_rays_copy, glm::vec3* d_lights_copy, bool* d_is_ray_alive_copy, int* d_ray_idx_copy)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( idx >= num_rays ) return;
if ( !d_is_ray_alive[idx] ) return;
int copy_idx = td_v[idx];
d_rays_copy[copy_idx] = d_rays[idx];
d_lights_copy[copy_idx] = d_lights[idx];
d_is_ray_alive_copy[copy_idx] = true;
d_ray_idx_copy[copy_idx] = d_ray_idx[idx];
}
// Wrapper for the __global__ call that sets up the kernel calls and does memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* cam, int frame, int iterations, material* materials, int num_materials, geom* geoms, int num_geoms)
{
int width = cam->resolution.x;
int height = cam->resolution.y;
int num_pixels = width * height;
// Device memory size.
int tile_size = 8;
dim3 threadsPerBlock(tile_size, tile_size);
dim3 fullBlocksPerGrid(ceil((float)width/tile_size), ceil((float)height/tile_size));
// Copy image to GPU.
glm::vec3* d_image = NULL;
cudaMalloc((void**)&d_image, num_pixels*sizeof(glm::vec3));
cudaMemcpy(d_image, cam->image, num_pixels*sizeof(glm::vec3), cudaMemcpyHostToDevice);
// Package geometry.
staticGeom* geomList = new staticGeom[num_geoms];
for ( int i=0; i<num_geoms; ++i )
{
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
// Copy geometry to GPU.
staticGeom* d_geoms = NULL;
cudaMalloc((void**)&d_geoms, num_geoms*sizeof(staticGeom));
cudaMemcpy( d_geoms, geomList, num_geoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
// Copy materials to GPU.
material* cudamaterials = NULL;
cudaMalloc((void**)&cudamaterials, num_materials*sizeof(material));
cudaMemcpy( cudamaterials, materials, num_materials*sizeof(material), cudaMemcpyHostToDevice);
// Package camera.
cameraData cam_data;
cam_data.resolution = cam->resolution;
cam_data.position = cam->positions[frame];
cam_data.view = cam->views[frame];
cam_data.up = cam->ups[frame];
cam_data.fov = cam->fov;
// Allocate GPU memory for rays & initialize them.
ray* d_rays = NULL;
glm::vec3* d_lights = NULL;
bool* d_is_ray_alive = NULL;
int* d_ray_idx = NULL;
cudaMalloc((void**)&d_rays, num_pixels*sizeof(ray));
cudaMalloc((void**)&d_lights, num_pixels*sizeof(glm::vec3));
cudaMalloc((void**)&d_is_ray_alive, num_pixels*sizeof(bool));
cudaMalloc((void**)&d_ray_idx, num_pixels*sizeof(int));
InitRay<<<fullBlocksPerGrid, threadsPerBlock>>>(cam_data, iterations, d_rays, d_lights, d_is_ray_alive, d_ray_idx);
// Start raytracer kernel.
int num_rays = num_pixels;
int max_depth = 10; // # of bounces when raytracing.
for ( int depth = 0; depth < max_depth; ++depth )
{
// Determine # of kernels to launch based on # of rays.
int num_threads_per_block = 128;
int num_blocks_per_grid = ceil((float)num_rays / num_threads_per_block);
// Update d_rays & d_lights based on intersected object.
TraceRay<<<num_blocks_per_grid, num_threads_per_block>>>(iterations, depth, max_depth, num_pixels, d_rays, num_rays, d_lights, d_is_ray_alive, d_ray_idx, d_image, d_geoms, num_geoms, cudamaterials, num_materials);
// Update d_rays by removing dead rays (stream compaction).
thrust::device_ptr<bool> td_is_ray_alive = thrust::device_pointer_cast(d_is_ray_alive);
thrust::device_vector<int> td_v(num_rays);
thrust::exclusive_scan(td_is_ray_alive, td_is_ray_alive + num_rays, td_v.begin());
// Allocate device memory for storing copy.
int num_copy_rays = td_v[num_rays-1] + (int) td_is_ray_alive[num_rays-1];
ray* d_rays_copy = NULL;
glm::vec3* d_lights_copy = NULL;
bool* d_is_ray_alive_copy = NULL;
int* d_ray_idx_copy = NULL;
cudaMalloc((void**)&d_rays_copy, num_copy_rays*sizeof(ray));
cudaMalloc((void**)&d_lights_copy, num_copy_rays*sizeof(glm::vec3));
cudaMalloc((void**)&d_is_ray_alive_copy, num_copy_rays*sizeof(bool));
cudaMalloc((void**)&d_ray_idx_copy, num_copy_rays*sizeof(int));
// Only copy living rays.
CompactRays<<<num_blocks_per_grid, num_threads_per_block>>>(thrust::raw_pointer_cast(td_v.data()), d_rays, d_lights, d_is_ray_alive, d_ray_idx, num_rays, d_rays_copy, d_lights_copy, d_is_ray_alive_copy, d_ray_idx_copy);
cudaDeviceSynchronize();
// Free old memory & update pointers to the copies.
cudaFree(d_rays);
cudaFree(d_lights);
cudaFree(d_is_ray_alive);
cudaFree(d_ray_idx);
num_rays = num_copy_rays;
d_rays = d_rays_copy;
d_lights = d_lights_copy;
d_is_ray_alive = d_is_ray_alive_copy;
d_ray_idx = d_ray_idx_copy;
}
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, cam->resolution, d_image);
// Retrieve image from GPU.
cudaMemcpy( cam->image, d_image, num_pixels*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
// Free memory.
cudaFree( d_image );
cudaFree( d_geoms );
cudaFree( cudamaterials );
cudaFree( d_rays );
cudaFree( d_lights );
cudaFree( d_is_ray_alive );
cudaFree( d_ray_idx );
delete [] geomList;
// Make sure the kernel has completed.
cudaDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
|
722ccb1a9fe68479b70b50c54f98865eb7ca9018.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/operators/prelu_op.h"
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
#define CUDA_NUM_THREADS 1024
inline static int PADDLE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DeviceContext, typename T>
class CUDAPReluKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* out = context.Output<Tensor>("Out");
const T* x_ptr = x->data<T>();
T* o_ptr = out->mutable_data<T>(context.GetPlace());
const T* alpha_ptr = alpha->data<T>();
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1]
<< ", numel:" << numel;
if (mode == "channel") {
math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise;
prelu_channel_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], dim[1], numel);
} else if (mode == "element") {
math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise;
prelu_element_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], numel);
} else {
math::PreluScalarDirectCUDAFunctor<T> prelu_scalar;
prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr,
o_ptr, numel);
}
}
};
enum PRELU_MODE { Element, Channel, Scalar };
template <typename T>
__global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr,
const T* dy_ptr, T* dx_ptr, T* dalpha_ptr,
size_t channel_num, size_t plane_size,
size_t spatial_size, size_t numel,
PRELU_MODE mode) {
CUDA_KERNEL_LOOP(index, numel) {
T scale;
if (mode == Element) {
size_t element_index = index % spatial_size;
scale = alpha_ptr[element_index];
} else if (mode == Channel) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
scale = alpha_ptr[channel_index];
} else {
scale = alpha_ptr[0];
}
T x = x_ptr[index];
T dy = dy_ptr[index];
if (dx_ptr != nullptr) dx_ptr[index] = (x > 0) ? dy : scale * dy;
if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > 0) ? 0 : x * dy;
}
}
template <typename T>
class PreluOpGradFunctor {
public:
void operator()(hipStream_t stream, const T* x, const T* alpha, const T* dy,
T* dx, T* dalpha, const framework::DDim& input_dims,
PRELU_MODE mode) {
size_t numel = 1;
for (size_t i = 0; i < input_dims.size(); ++i) {
numel *= input_dims[i];
}
size_t plane_size = numel / input_dims[0] / input_dims[1];
size_t spatial_size = numel / input_dims[0];
hipLaunchKernelGGL(( PReluOpGradKernel<
T>), dim3(PADDLE_GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, stream,
x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size,
numel, mode);
}
};
template <typename T>
struct IdentityFunctor {
HOSTDEVICE inline T operator()(const T& x) const { return x; }
};
template <typename DeviceContext, typename T>
class CUDAPReluGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
auto* dy = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha"));
const T* x_ptr = x->data<T>();
const T* alpha_ptr = alpha->data<T>();
const T* dy_ptr = dy->data<T>();
T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr;
T* dalpha_ptr =
dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr;
if (!dx && !dalpha) return;
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
std::vector<int> input_shape = framework::vectorize<int>(dim);
auto stream = context.cuda_device_context().stream();
T* dalpha_tmp_ptr;
Tensor dalpha_tmp;
if (dalpha_ptr == nullptr) {
dalpha_tmp_ptr = dalpha_ptr;
} else {
auto& dev_ctx = context.template device_context<DeviceContext>();
dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx);
dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace());
}
PRELU_MODE m;
if (mode == "element") {
m = Element;
} else if (mode == "channel") {
m = Channel;
} else {
m = Scalar;
}
PreluOpGradFunctor<T> prelu_grad;
prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim,
m);
if (dalpha_tmp_ptr == nullptr) return;
std::vector<int> reduce_dims;
for (size_t i = 0; i < dim.size(); i++) {
if (mode == "channel" && i == 1) continue;
if (mode == "element" && i != 0) continue;
reduce_dims.push_back(i);
}
TensorReduce<T, T, hipcub::Sum, IdentityFunctor<T>>(
dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), hipcub::Sum(),
IdentityFunctor<T>(), stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
prelu_grad,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);
|
722ccb1a9fe68479b70b50c54f98865eb7ca9018.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/prelu.h"
#include "paddle/fluid/operators/prelu_op.h"
#include "paddle/fluid/operators/reduce_ops/cub_reduce.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
#define CUDA_NUM_THREADS 1024
inline static int PADDLE_GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename DeviceContext, typename T>
class CUDAPReluKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* out = context.Output<Tensor>("Out");
const T* x_ptr = x->data<T>();
T* o_ptr = out->mutable_data<T>(context.GetPlace());
const T* alpha_ptr = alpha->data<T>();
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
VLOG(4) << "dim[0]:" << dim[0] << ", dim[1]:" << dim[1]
<< ", numel:" << numel;
if (mode == "channel") {
math::PreluChannelWiseDirectCUDAFunctor<T> prelu_channel_wise;
prelu_channel_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], dim[1], numel);
} else if (mode == "element") {
math::PreluElementWiseDirectCUDAFunctor<T> prelu_element_wise;
prelu_element_wise(context.cuda_device_context().stream(), x_ptr,
alpha_ptr, o_ptr, dim[0], numel);
} else {
math::PreluScalarDirectCUDAFunctor<T> prelu_scalar;
prelu_scalar(context.cuda_device_context().stream(), x_ptr, alpha_ptr,
o_ptr, numel);
}
}
};
enum PRELU_MODE { Element, Channel, Scalar };
template <typename T>
__global__ void PReluOpGradKernel(const T* x_ptr, const T* alpha_ptr,
const T* dy_ptr, T* dx_ptr, T* dalpha_ptr,
size_t channel_num, size_t plane_size,
size_t spatial_size, size_t numel,
PRELU_MODE mode) {
CUDA_KERNEL_LOOP(index, numel) {
T scale;
if (mode == Element) {
size_t element_index = index % spatial_size;
scale = alpha_ptr[element_index];
} else if (mode == Channel) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
scale = alpha_ptr[channel_index];
} else {
scale = alpha_ptr[0];
}
T x = x_ptr[index];
T dy = dy_ptr[index];
if (dx_ptr != nullptr) dx_ptr[index] = (x > 0) ? dy : scale * dy;
if (dalpha_ptr != nullptr) dalpha_ptr[index] = (x > 0) ? 0 : x * dy;
}
}
template <typename T>
class PreluOpGradFunctor {
public:
void operator()(cudaStream_t stream, const T* x, const T* alpha, const T* dy,
T* dx, T* dalpha, const framework::DDim& input_dims,
PRELU_MODE mode) {
size_t numel = 1;
for (size_t i = 0; i < input_dims.size(); ++i) {
numel *= input_dims[i];
}
size_t plane_size = numel / input_dims[0] / input_dims[1];
size_t spatial_size = numel / input_dims[0];
PReluOpGradKernel<
T><<<PADDLE_GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, stream>>>(
x, alpha, dy, dx, dalpha, input_dims[1], plane_size, spatial_size,
numel, mode);
}
};
template <typename T>
struct IdentityFunctor {
HOSTDEVICE inline T operator()(const T& x) const { return x; }
};
template <typename DeviceContext, typename T>
class CUDAPReluGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* alpha = context.Input<Tensor>("Alpha");
auto* dx = context.Output<Tensor>(framework::GradVarName("X"));
auto* dy = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dalpha = context.Output<Tensor>(framework::GradVarName("Alpha"));
const T* x_ptr = x->data<T>();
const T* alpha_ptr = alpha->data<T>();
const T* dy_ptr = dy->data<T>();
T* dx_ptr = dx ? dx->mutable_data<T>(context.GetPlace()) : nullptr;
T* dalpha_ptr =
dalpha ? dalpha->mutable_data<T>(context.GetPlace()) : nullptr;
if (!dx && !dalpha) return;
auto& mode = context.Attr<std::string>("mode");
int numel = x->numel();
auto dim = x->dims();
std::vector<int> input_shape = framework::vectorize<int>(dim);
auto stream = context.cuda_device_context().stream();
T* dalpha_tmp_ptr;
Tensor dalpha_tmp;
if (dalpha_ptr == nullptr) {
dalpha_tmp_ptr = dalpha_ptr;
} else {
auto& dev_ctx = context.template device_context<DeviceContext>();
dalpha_tmp = context.AllocateTmpTensor<T, DeviceContext>(dim, dev_ctx);
dalpha_tmp_ptr = dalpha_tmp.mutable_data<T>(context.GetPlace());
}
PRELU_MODE m;
if (mode == "element") {
m = Element;
} else if (mode == "channel") {
m = Channel;
} else {
m = Scalar;
}
PreluOpGradFunctor<T> prelu_grad;
prelu_grad(stream, x_ptr, alpha_ptr, dy_ptr, dx_ptr, dalpha_tmp_ptr, dim,
m);
if (dalpha_tmp_ptr == nullptr) return;
std::vector<int> reduce_dims;
for (size_t i = 0; i < dim.size(); i++) {
if (mode == "channel" && i == 1) continue;
if (mode == "element" && i != 0) continue;
reduce_dims.push_back(i);
}
TensorReduce<T, T, cub::Sum, IdentityFunctor<T>>(
dalpha_tmp, dalpha, reduce_dims, static_cast<T>(0), cub::Sum(),
IdentityFunctor<T>(), stream);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
prelu, ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
prelu_grad,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::CUDAPReluGradKernel<paddle::platform::CUDADeviceContext, double>);
|
18de8f1e3b4c5d3074a6ac68da705ecf4b53b727.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mycommon.h"
#define BSIZE 256
__constant__ int nfeat;
__constant__ int ntrain;
__constant__ int ntest;
__constant__ int nclass;
__constant__ int k;
__constant__ int nnegibor;
__constant__ double mu;
__constant__ double nu[4];
__constant__ int idx_o;
__constant__ int *target;
__constant__ double *km_train;
__constant__ double *km_test;
__constant__ double *O[2];
__constant__ double *t_target;
__constant__ double *t_triplet;
__constant__ double *t_update;
__constant__ double *t_gradient;
__constant__ short *label_train;
__constant__ short *label_test;
__constant__ struct Inst *grouped_inst;
__constant__ struct Inst *type_inst[4];
__constant__ unsigned typecount[4];
__constant__ int *target_offset;
__constant__ int nn[4];
__constant__ double *dist_target;
__constant__ double *dist1;
__constant__ double *dist2;
__constant__ double *hinge_val;
__constant__ double *dist_knn;
__constant__ int *ino_knn;
__constant__ int *neighbor_knn;
__device__ double f_val;
__device__ double sub_fval[84];
__device__ double acc_knn;
__device__ int hits[4];
__device__ void kernelMatrix(double *km, double *d1, int n1, double *d2, int n2){
int ub = n1 * n2;
int stride = blockDim.x * gridDim.x;
double c_val;
int i, j;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
i = m / n2;
j = m % n2;
c_val = .0;
for (int n = 0; n < nfeat; ++ n)
c_val += pow(d1[n * n1 + i] - d2[n * n2 + j], 2);
km[m] = exp(-c_val / nfeat);
}
}
__global__ void calcKM(double *train, double *test){
kernelMatrix(km_train, train, ntrain, train, ntrain);
kernelMatrix(km_test, test, ntest, train, ntrain);
}
__device__ double getElement(double *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElement(double *m, int i, int j, int stride, double val){
m[i * stride + j] = val;
}
__device__ int getElementInt(int *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElementInt(int *m, int i, int j, int stride, int val){
m[i * stride + j] = val;
}
//__device__ int getTarget(int i, int kk){
// return target[i * k + kk];
//}
__device__ int getTargetByOffset(int ino, int kk){
return target[target_offset[ino] + kk];
}
__device__ void setTargetByOffset(int ino, int kk, int t){
target[target_offset[ino] + kk] = t;
}
__device__ int getTargetDist(int ino, int kk){
return dist_target[target_offset[ino] + kk];
}
__device__ double calcDist(int i, double *km1, int j, double *km2){
int tid = threadIdx.x;
__shared__ double diff_k[256];
__shared__ double sum[256];
__shared__ double norm[64];
if (tid < 64)
norm[tid] = .0;
int pos;
for (int m = 0; m < (ntrain - 1)/blockDim.x + 1; ++ m){
__syncthreads();
pos = m * blockDim.x + tid;
if (pos < ntrain)
diff_k[tid] = getElement(km1, i, pos, ntrain) - getElement(km2, j, pos, ntrain);
for (int d = 0; d < nfeat; ++ d){
__syncthreads();
if (pos < ntrain)
sum[tid] = getElement(O[idx_o], d, pos, ntrain) * diff_k[tid];
else
sum[tid] = .0;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride)
sum[tid] += sum[tid + stride];
stride /= 2;
}
__syncthreads();
if (tid == 0)
norm[d] += sum[0];
}
}
if (tid < nfeat)
norm[tid] = norm[tid]*norm[tid];
__syncthreads();
double s = .0;
for (int d = 0; d < nfeat; ++ d)
s += norm[d];
return s;
}
__device__ void calcTargetDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
if (tid == 0)
sub_fval[bid] = .0;
int c = 0;
for (int m = 0; m < ntrain; ++ m)
for (int n = 0; n < nn[label_train[m]]; ++ n){
i = m;
j = getTargetByOffset(m, n);
if(c%gridDim.x == bid){
double val = calcDist(i, km_train, j, km_train);
if (tid == 0){
dist_target[target_offset[m] + n] = val;
sub_fval[bid] += val;
}
}
++ c;
}
}
__device__ void updateDist(double *dist, struct Inst * inst1, int height, struct Inst * inst2, int width){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
for (int m = bid; m < height * width; m += gridDim.x){
i = inst1[m / width].ino;
j = inst2[m % width].ino;
double val = calcDist(i, km_train, j, km_train);
if (tid == 0)
dist[m] = val;
}
}
__global__ void update2(){
calcTargetDist();
updateDist(dist1, type_inst[TN], typecount[TN], type_inst[FN], typecount[FN]);
if (nclass == 4)
updateDist(dist2, type_inst[TP], typecount[TP], type_inst[FP], typecount[FP]);
}
__device__ double hinge(double s){
if (s <= -1.0)
return .0;
else if (s >= 0)
return 1.0;
else
return 1 + s;
}
__device__ void updateTri(int idx1, int idx2, int idx3, double h){
__syncthreads();
for (int p = threadIdx.x; p < ntrain; p += blockDim.x)
t_triplet[p * ntrain + idx1] += h * (getElement(km_train, idx2, p, ntrain) - getElement(km_train, idx3, p, ntrain));
}
__global__ void zeroT_triplet(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < ntrain * ntrain; m += size)
t_triplet[m] = .0;
}
__global__ void update3_2(){
int bid = blockIdx.x;
int i, j, l;
double vdist, h;
if (bid == 0 && threadIdx.x == 0)
f_val = .0;
for (int m = 0; m < typecount[TN] * typecount[FN]; ++ m){
i = type_inst[TN][m / typecount[FN]].ino;
l = type_inst[FN][m % typecount[FN]].ino;
for (int kk = 0; kk < nn[TN]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
//if (label_train[i] == TP)
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
l = type_inst[TN][m / typecount[FN]].ino;
i = type_inst[FN][m % typecount[FN]].ino;
for (int kk = 0; kk < nn[FN]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
//if (label_train[i] == TP)
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
}
if (nclass == 4){
for (int m = 0; m < typecount[TP] * typecount[FP]; ++ m){
i = type_inst[TP][m / typecount[FP]].ino;
l = type_inst[FP][m % typecount[FP]].ino;
for (int kk = 0; kk < nn[TP]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist2[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
l = type_inst[TP][m / typecount[FP]].ino;
i = type_inst[FP][m % typecount[FP]].ino;
for (int kk = 0; kk < nn[FP]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist2[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
}
}
}
__global__ void calcFval(){
if (blockIdx.x == 0 && threadIdx.x == 0)
for (int i = 0; i < gridDim.x; ++ i)
f_val += sub_fval[i];
}
__global__ void updateUpdateTerm(double alpha){
int size = gridDim.x * blockDim.x;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ntrain * ntrain; m += size){
if (m/ntrain == m%ntrain)
t_update[m] = 1 - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
else
t_update[m] = - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
}
}
__global__ void copyO(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < nfeat * ntrain; m += size)
O[idx_o][m] = O[1 - idx_o][m];
}
__global__ void zeroO(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < nfeat * ntrain; m += size)
O[1 - idx_o][m] = .0;
}
__global__ void updateO1(){
int tid = threadIdx.x;
int bid_row = blockIdx.x;
int bid_col = blockIdx.y;
int workingtid = min(BSIZE, ntrain - bid_col * BSIZE);
if (tid < workingtid)
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] = .0;
for (int start = 0; start < ntrain; start += BSIZE){
int len = min(BSIZE, ntrain - start);
for (int i = 0; i < len; ++ i){
if (tid < workingtid){
double val = getElement(O[idx_o], bid_row, start + i, ntrain) * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] += val;
}
}
}
}
__global__ void knnUpdateDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x;
for(int m = bid; m < ntest * ntrain; m += size){
int i = m / ntrain;
int j = m % ntrain;
double d = DBL_MAX;
if (nclass == 2)
d = calcDist(i, km_test, j, km_train);
else{
if (label_test[i] == label_train[j] || label_test[i] + label_train[j] == 3)
d = calcDist(i, km_test, j, km_train);
}
if (tid == 0){
ino_knn[m] = j;
dist_knn[m] = d;
}
}
}
// lauched with # block = ntest
__global__ void knnFindNeighbor(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int len = ntrain / BSIZE;
int start = tid * len;
if (tid < ntrain % BSIZE){
start += tid;
++ len;
}
else
start += ntrain % BSIZE;
__syncthreads();
int b = min(len, nnegibor);
for (int i = 0; i < b; ++ i)
for (int j = start; j < start + len - i - 1; ++ j)
if(getElement(dist_knn, bid, j, ntrain) < getElement(dist_knn, bid, j + 1, ntrain)){
double tmp_dist = getElement(dist_knn, bid, j, ntrain);
setElement(dist_knn, bid, j, ntrain, getElement(dist_knn, bid, j + 1, ntrain));
setElement(dist_knn, bid, j + 1, ntrain, tmp_dist);
int tmp_ino = getElementInt(ino_knn, bid, j, ntrain);
setElementInt(ino_knn, bid, j, ntrain, getElementInt(ino_knn, bid, j + 1, ntrain));
setElementInt(ino_knn, bid, j + 1, ntrain, tmp_ino);
}
__syncthreads();
__shared__ double dist[BSIZE];
__shared__ int ino[BSIZE];
__shared__ int shortest[BSIZE];
int p = start + len -1;
for (int i = 0; i < nnegibor; ++ i){
if (b > 0){
dist[tid] = getElement(dist_knn, bid, p, ntrain);
ino[tid] = getElementInt(ino_knn, bid, p, ntrain);
}
else
dist[tid] = DBL_MAX;
shortest[tid] = tid;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride){
if (dist[tid] > dist[tid + stride]){
dist[tid] = dist[tid + stride];
ino[tid] = ino[tid + stride];
shortest[tid] = shortest[tid + stride];
}
}
stride /= 2;
}
__syncthreads();
if(tid == 0)
setElementInt(neighbor_knn, bid, i, nnegibor, ino[0]);
if(tid == shortest[0]){
-- b;
-- p;
}
}
}
__global__ void knnMatching(){
int ub = ntest * nnegibor;
int stride = blockDim.x * gridDim.x;
int idx_test, idx_train;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
idx_test = m / nnegibor;
idx_train = neighbor_knn[m];
if (label_test[idx_test] == label_train[idx_train])
neighbor_knn[m] = 1;
else
neighbor_knn[m] = 0;
}
}
// lauch with single block
__global__ void knnAcc(int neiborhood_size){
int tid = threadIdx.x;
int stride = blockDim.x;
if (tid < 4)
hits[tid] = 0;
__shared__ int matched[BSIZE];
matched[tid] = 0;
for (int m = tid; m < ntest; m += stride){
int nsametype = 0;
for (int i = 0; i < neiborhood_size; ++ i)
nsametype += neighbor_knn[m * nnegibor + i];
if (nsametype > neiborhood_size/2){
matched[tid] += 1;
if (label_test[m] == FN || label_test[m] == FP)
atomicAdd(&hits[label_test[m]], 1);
}
else{
if (label_test[m] == TN || label_test[m] == TP)
atomicSub(&hits[label_test[m]], 1);
}
}
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
matched[tid] += matched[tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid ==0)
acc_knn = 1.0 * matched[0] / ntest;
}
__global__ void knnUpdateDist_train(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x;
for(int m = bid; m < ntrain * ntrain; m += size){
int i = m / ntrain;
int j = m % ntrain;
double d = DBL_MAX;
if (i != j)
if (nclass == 2)
d = calcDist(i, km_train, j, km_train);
else
if (label_train[i] == label_train[j] || label_train[i] + label_train[j] == 3)
d = calcDist(i, km_train, j, km_train);
if (tid == 0){
ino_knn[m] = j;
dist_knn[m] = d;
}
}
}
// lauched with # block = ntrain
__global__ void knnFindNeighbor_train(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int len = ntrain / BSIZE;
int start = tid * len;
if (tid < ntrain % BSIZE){
start += tid;
++ len;
}
else
start += ntrain % BSIZE;
__syncthreads();
int b = min(len, nnegibor);
/* each thread sort its own chunk (start, len) by bubble sorting for b iterations.
First b elements of ino_knn hold the closest b neighbors.*/
for (int i = 0; i < b; ++ i)
for (int j = start; j < start + len - i - 1; ++ j)
if(getElement(dist_knn, bid, j, ntrain) < getElement(dist_knn, bid, j + 1, ntrain)){
double tmp_dist = getElement(dist_knn, bid, j, ntrain);
setElement(dist_knn, bid, j, ntrain, getElement(dist_knn, bid, j + 1, ntrain));
setElement(dist_knn, bid, j + 1, ntrain, tmp_dist);
int tmp_ino = getElementInt(ino_knn, bid, j, ntrain);
setElementInt(ino_knn, bid, j, ntrain, getElementInt(ino_knn, bid, j + 1, ntrain));
setElementInt(ino_knn, bid, j + 1, ntrain, tmp_ino);
}
__syncthreads();
__shared__ double dist[BSIZE];
__shared__ int ino[BSIZE];
__shared__ int shortest[BSIZE];
/* perform a merge sort of BSIZE sorted chunk. */
int p = start + len -1;
for (int i = 0; i < nnegibor; ++ i){
if (b > 0){
dist[tid] = getElement(dist_knn, bid, p, ntrain);
ino[tid] = getElementInt(ino_knn, bid, p, ntrain);
}
else
dist[tid] = DBL_MAX;
shortest[tid] = tid;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride){
if (dist[tid] > dist[tid + stride]){
dist[tid] = dist[tid + stride];
ino[tid] = ino[tid + stride];
shortest[tid] = shortest[tid + stride];
}
}
stride /= 2;
}
__syncthreads();
if(tid == 0)
setElementInt(neighbor_knn, bid, i, nnegibor, ino[0]);
if(tid == shortest[0]){
-- b;
-- p;
}
}
}
__global__ void knnMatching_train(){
int ub = ntrain * nnegibor;
int stride = blockDim.x * gridDim.x;
int idx_train1, idx_train2;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
idx_train1 = m / nnegibor;
idx_train2 = neighbor_knn[m];
if (label_train[idx_train1] == label_train[idx_train2])
neighbor_knn[m] = 1;
else
neighbor_knn[m] = 0;
}
}
// lauch with single block
__global__ void knnAcc_train(int neiborhood_size){
int tid = threadIdx.x;
int stride = blockDim.x;
if (tid < 4)
hits[tid] = 0;
__shared__ int matched[BSIZE];
matched[tid] = 0;
for (int m = tid; m < ntrain; m += stride){
int nsametype = 0;
for (int i = 0; i < neiborhood_size; ++ i)
nsametype += neighbor_knn[m * nnegibor + i];
if (nsametype > neiborhood_size/2){
matched[tid] += 1;
if (label_train[m] == FN || label_train[m] == FP)
atomicAdd(&hits[label_train[m]], 1);
}
else{
if (label_train[m] == TN || label_train[m] == TP)
atomicSub(&hits[label_train[m]], 1);
}
}
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
matched[tid] += matched[tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid ==0)
acc_knn = 1.0 * matched[0] / ntrain;
}
__global__ void updateTarget(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
int max_nn = max(max(nn[0], nn[1]), max(nn[2], nn[3]));
for (int m = blockDim.x * bid + tid; m < ntrain * max_nn; m += size){
int ino = m / max_nn;
int idx_neighbor = m % max_nn;
if (idx_neighbor < nn[label_train[ino]])
setTargetByOffset(ino, idx_neighbor, getElementInt(neighbor_knn, ino, idx_neighbor, nnegibor));
}
}
__global__ void zeroTargetTerm(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < ntrain * ntrain; m += size)
t_target[m] = .0;
}
__device__ void updateTar(int idx1, int idx2, double h){
__syncthreads();
for (int p = threadIdx.x; p < ntrain; p += blockDim.x)
t_target[p * ntrain + idx1] += h * (getElement(km_train, idx1, p, ntrain) - getElement(km_train, idx2, p, ntrain));
}
__global__ void updateTargetTerm(){
int i, j;
double h;
int bid = blockIdx.x;
for (i = 0; i < ntrain; ++ i){
for (int kk = 0; kk < nn[label_train[i]]; ++ kk){
j = getTargetByOffset(i, kk);
//if (label_train[i] == TP)
// h = nu;
//else
//h = 1.0;
h = nu[label_train[i]];
if (i % gridDim.x == bid)
updateTar(i, j, h);
if (j % gridDim.x == bid)
updateTar(j, i, h);
}
}
}
__global__ void countTarget(){
__shared__ int stay[BSIZE*4];
int tid = threadIdx.x;
for (int i = 0; i < 4; ++ i)
stay[tid + BSIZE * i] = 0;
for(int m = tid; m < ntrain; m += BSIZE){
int l = label_train[m];
for (int i = 0; i < nn[l]; ++ i){
int n = getElementInt(neighbor_knn, m, i, nnegibor);
for (int j = 0; j < nn[l]; ++ j){
int t = getTargetByOffset(m, j);
if ( n == t)
++ stay[l * BSIZE + tid];
}
}
}
for (int i = 0; i < 4; ++ i){
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
stay[BSIZE * i + tid] += stay[BSIZE * i + tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid == 0)
hits[i] = stay[BSIZE * i];
}
}
void deviceInitKernelMatrix(int *trainninst, int *testninst, int *nf, double *traindata, double *testdata){
hipMemcpyToSymbol(ntrain, trainninst, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(ntest, testninst, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nfeat, nf, sizeof(int), 0, hipMemcpyHostToDevice);
double *d_train_data, *d_test_data;
hipMalloc((void **)&d_train_data, sizeof(double) * (*trainninst) * (*nf));
hipMalloc((void **)&d_test_data, sizeof(double) * (*testninst) * (*nf));
hipMemcpy(d_train_data, traindata, sizeof(double) * (*trainninst) * (*nf), hipMemcpyHostToDevice);
hipMemcpy(d_test_data, testdata, sizeof(double) * (*testninst) * (*nf), hipMemcpyHostToDevice);
double *d_kernel_matrix_train, *d_kernel_matrix_test;
hipMalloc((void **)&d_kernel_matrix_train, sizeof(double) * (*trainninst) * (*trainninst));
hipMemcpyToSymbol(km_train, &d_kernel_matrix_train, sizeof(double*), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&d_kernel_matrix_test, sizeof(double) * (*testninst) * (*trainninst));
hipMemcpyToSymbol(km_test, &d_kernel_matrix_test, sizeof(double*), 0, hipMemcpyHostToDevice);
// Run the event recording
hipEvent_t start_event, stop_event;
hipEventCreate(&start_event) ;
hipEventCreate(&stop_event) ;
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( calcKM), dim3(84), dim3(256), 0, 0, d_train_data, d_test_data);
hipDeviceSynchronize();
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipFree(d_train_data);
hipFree(d_test_data);
}
unsigned *tcount;
int *ncount;
int totalMissed;
double targetCoverage[4];
double minCoverage;
int super = 0;
void deviceInitTarget(int *h_target, int trainninst, int targetsize, int *nc, int *Nneighbor, int *offset){
ncount = Nneighbor;
int *d_target;
hipMalloc((void **)&d_target, sizeof(int) * targetsize);
hipMemcpy(d_target, h_target, sizeof(int) * targetsize, hipMemcpyHostToDevice);
hipMemcpyToSymbol(target, &d_target, sizeof(int*), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&d_target, sizeof(int) * trainninst);
hipMemcpy(d_target, offset, sizeof(int) * trainninst, hipMemcpyHostToDevice);
hipMemcpyToSymbol(target_offset, &d_target, sizeof(int*), 0, hipMemcpyHostToDevice);
//hipMemcpyToSymbol(k, kk, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nclass, nc, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nn, Nneighbor, sizeof(int) * 4, 0, hipMemcpyHostToDevice);
}
void deviceInitLabelTrain(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
hipMalloc((void **)&d_label, sizeof(short) * ninst);
hipMemcpy(d_label, label, sizeof(short) * ninst, hipMemcpyHostToDevice);
hipMemcpyToSymbol(label_train, &d_label, sizeof(short*), 0, hipMemcpyHostToDevice);
delete[] label;
}
void deviceInitLabelTest(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
hipMalloc((void **)&d_label, sizeof(short) * ninst);
hipMemcpy(d_label, label, sizeof(short) * ninst, hipMemcpyHostToDevice);
hipMemcpyToSymbol(label_test, &d_label, sizeof(short*), 0, hipMemcpyHostToDevice);
delete[] label;
}
void deviceInitInstList(struct Inst *inst, unsigned *count, unsigned ninst, int nc, int targetsize){
tcount = count;
hipMemcpyToSymbol(typecount, count, sizeof(unsigned) * 4, 0, hipMemcpyHostToDevice);
struct Inst *gi[4];
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
gi[i] = (struct Inst *)malloc(sizeof(struct Inst) * count[i]);
}
int p[4] = {0, 0, 0, 0};
for(int i = 0; i < ninst; ++ i){
int type = inst[i].label;
gi[type][p[type]].ino = inst[i].ino;
gi[type][p[type]].label = inst[i].label;
++ p[type];
}
struct Inst *d_inst;
hipMalloc((void **)&d_inst, sizeof(struct Inst) * ninst);
unsigned start = 0;
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
hipMemcpy(d_inst + start, gi[i], sizeof(struct Inst) * count[i], hipMemcpyHostToDevice);
struct Inst *dd_inst = d_inst + start;
hipMemcpyToSymbol(type_inst, &dd_inst, sizeof(struct Inst *), i * sizeof(struct Inst *), hipMemcpyHostToDevice);
start += count[i];
}
hipMemcpyToSymbol(grouped_inst, &d_inst, sizeof(struct Inst *), 0, hipMemcpyHostToDevice);
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
free(gi[i]);
}
double *distanceTarget, *distanceMatrix1, *distanceMatrix2;
hipMalloc((void **)&distanceTarget, sizeof(double) * targetsize);
hipMemcpyToSymbol(dist_target, &distanceTarget, sizeof(double *), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&distanceMatrix1, sizeof(double) * count[TN] * count[FN]);
hipMemcpyToSymbol(dist1, &distanceMatrix1, sizeof(double *), 0, hipMemcpyHostToDevice);
if (nc == 4){
hipMalloc((void **)&distanceMatrix2, sizeof(double) * count[TP] * count[FP]);
hipMemcpyToSymbol(dist2, &distanceMatrix2, sizeof(double *), 0, hipMemcpyHostToDevice);
}
}
void deviceInitMu(double m, double n[]){
double local_m = m;
hipMemcpyToSymbol(mu, &local_m, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nu, n, sizeof(double) * 4, 0, hipMemcpyHostToDevice);
double nn[4];
hipMemcpyFromSymbol(nn, nu, sizeof(double) * 4, 0, hipMemcpyDeviceToHost);
cout << "retrieve nu: " << nn[0] << " " << nn[1] << " " << nn[2] << " " << nn[3] << endl;
}
void deviceInitO(double *o, int size){
double *d_t;
//cout << "double O: " << o[1] << endl;
hipMalloc((void **)&d_t, sizeof(double) * size);
hipMemcpy(d_t, o, sizeof(double) * size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(O, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
hipMalloc((void **)&d_t, sizeof(double) * size);
hipMemcpyToSymbol(O, &d_t, sizeof(double*), sizeof(double*), hipMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
}
void deviceInitTargetTerm(double *t, int size){
double *d_t;
hipMalloc((void **)&d_t, sizeof(double) * size);
hipMemcpy(d_t, t, sizeof(double) * size, hipMemcpyHostToDevice);
hipMemcpyToSymbol(t_target, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
}
void deviceInitUpdateTerm(int size1, int size2){
double *d_t;
hipMalloc((void **)&d_t, sizeof(double) * size1);
hipMemcpyToSymbol(t_update, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
hipMalloc((void **)&d_t, sizeof(double) * size2);
hipMemcpyToSymbol(t_gradient, &d_t, sizeof(double*), 0, hipMemcpyHostToDevice);
}
void deviceInitTri(int size){
double *t_o;
hipMalloc((void **)&t_o, sizeof(double) * size);
hipMemcpyToSymbol(t_triplet, &t_o, sizeof(double*), 0, hipMemcpyHostToDevice);
}
void deviceInitKnn(int n_train, int n_test, int kk){
double *d_knn;
//hipMalloc((void **)&d_knn, sizeof(double) * n_test * n_train);
hipMalloc((void **)&d_knn, sizeof(double) * n_train * n_train);
hipMemcpyToSymbol(dist_knn, &d_knn, sizeof(double*), 0, hipMemcpyHostToDevice);
int* i_knn;
//hipMalloc((void **)&i_knn, sizeof(int) * n_test * n_train);
hipMalloc((void **)&i_knn, sizeof(int) * n_train * n_train);
hipMemcpyToSymbol(ino_knn, &i_knn, sizeof(int*), 0, hipMemcpyHostToDevice);
//hipMalloc((void **)&i_knn, sizeof(int) * n_test * kk);
hipMalloc((void **)&i_knn, sizeof(int) * n_train * kk);
hipMemcpyToSymbol(neighbor_knn, &i_knn, sizeof(int*), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(nnegibor, &kk, sizeof(int), 0, hipMemcpyHostToDevice);
}
int targetUpdateNeeded(double alpha){
if (super){
super = 0;
return 1;
}
if (alpha < 1e-8 && totalMissed > 0)
//if ((alpha < 1e-8 && totalMissed > 0) || minCoverage < 0.5)
return 1;
else
return 0;
}
void kernelTest(int d, int n, int n_test, int k[], double *result, double mu, double alpha, double nu[]){
char path[1024];
getcwd(path, 1024);
double original_alpha = alpha;
double dd[20];
int h_hits[4];
deviceInitKnn(n, n_test, 40);
double f_old = DBL_MAX;
double min_iter = 0;
double global_max_acc = .0;
unsigned global_max_iter = 0;
unsigned global_max_pos = 0;
double global_max_acc_train = .0;
unsigned global_max_iter_train = 0;
unsigned global_max_pos_train = 0;
int kk = 5;
bool targetUpdated = false;
int idx = 1;
unsigned iter = 0;
while(true){
// Run the event recording
hipEvent_t start_event, stop_event;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
cout << endl << "Iter = " << iter << ", mu = " << mu << ", k = " << k[0] << "," << k[1] << "," << k[2] << "," << k[3] << ", nu = " << nu[0] << "," << nu[1] << "," << nu[2] << "," << nu[3] << endl;
idx = 1 - idx;
hipMemcpyToSymbol(idx_o, &idx, sizeof(int), 0, hipMemcpyHostToDevice);
// update target and target term periodically
if (targetUpdateNeeded(alpha)){
hipLaunchKernelGGL(( knnUpdateDist_train), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( knnFindNeighbor_train), dim3(n), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( updateTarget), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( zeroTargetTerm), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( updateTargetTerm), dim3(84), dim3(BSIZE), 0, 0, );
alpha = original_alpha;
targetUpdated = true;
}
// update distances to targets(i,j) and between opposing points(i,l)
hipLaunchKernelGGL(( update2), dim3(84), dim3(256), 0, 0, );
// update t_triplet by calculating vdist of every (i, j, l)
hipLaunchKernelGGL(( zeroT_triplet), dim3(84), dim3(256), 0, 0, );
hipLaunchKernelGGL(( update3_2), dim3(84), dim3(256), 0, 0, );
// update object function value
hipLaunchKernelGGL(( calcFval), dim3(84), dim3(256), 0, 0, );
hipDeviceSynchronize();
hipMemcpyFromSymbol(&dd[9], f_val, sizeof(double), 0, hipMemcpyDeviceToHost);
cout << "f_val= " << dd[9];
if (dd[9] < f_old || targetUpdated){
targetUpdated = false;
cout << ", reduced by " << f_old - dd[9] << endl;
f_old = dd[9];
min_iter = iter;
alpha *= 1.1;
hipLaunchKernelGGL(( knnUpdateDist), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( knnFindNeighbor), dim3(n_test), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( knnMatching), dim3(84), dim3(BSIZE), 0, 0, );
for (int i = 0; i < 20; ++ i){
hipLaunchKernelGGL(( knnAcc), dim3(1), dim3(BSIZE), 0, 0, 2 * i + 1);
hipDeviceSynchronize();
hipMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&dd[i], acc_knn, sizeof(double), 0, hipMemcpyDeviceToHost);
cout << h_hits[0] + h_hits[1] + h_hits[2] + h_hits[3] << "(" << h_hits[0] << "," << h_hits[1] << "," << h_hits[2] << "," << h_hits[3] << "), ";
//if (2 * i + 1 == kk && h_hits[0] + h_hits[1] + h_hits[2] + h_hits[3] >= 0)
//super = 1;
}
double max_acc = .0;
int max_acc_k = -1;
for (int i = 0; i < 20; ++ i){
if (dd[i] > max_acc){
max_acc = dd[i];
max_acc_k = 2 * i + 1;
}
}
if (max_acc >= global_max_acc&&iter>10){
global_max_acc = max_acc;
global_max_iter = iter;
global_max_pos = max_acc_k;
}
cout << endl << "max acc = " << max_acc << " at k = " << max_acc_k
<< ". global max = " << global_max_acc << " in iter " << global_max_iter << " at k = " << global_max_pos << endl;
hipLaunchKernelGGL(( knnUpdateDist_train), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( knnFindNeighbor_train), dim3(n), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( countTarget), dim3(1), dim3(BSIZE), 0, 0, );
hipDeviceSynchronize();
hipMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, hipMemcpyDeviceToHost);
cout << "Targets: "
<< 1.0 * h_hits[0]/(tcount[0]*ncount[0]) << "(" << h_hits[0] << "/" << tcount[0]*ncount[0] << "), "
<< 1.0 * h_hits[1]/(tcount[1]*ncount[1]) << "(" << h_hits[1] << "/" << tcount[1]*ncount[1] << "), "
<< 1.0 * h_hits[2]/(tcount[2]*ncount[2]) << "(" << h_hits[2] << "/" << tcount[2]*ncount[2] << "), "
<< 1.0 * h_hits[3]/(tcount[3]*ncount[3]) << "(" << h_hits[3] << "/" << tcount[3]*ncount[3] << ")"<< endl ;
minCoverage = 1.0;
for (int i = 0; i < 4; ++ i){
targetCoverage[i] = 1.0 * h_hits[i] / (tcount[i]*ncount[i]);
if (minCoverage > targetCoverage[i])
minCoverage = targetCoverage[i];
}
totalMissed = 0;
for (int i = 0; i < 4; ++ i)
totalMissed += tcount[i]*ncount[i] - h_hits[i];
hipLaunchKernelGGL(( knnMatching_train), dim3(84), dim3(BSIZE), 0, 0, );
for (int i = 0; i < 20; ++ i){
hipLaunchKernelGGL(( knnAcc_train), dim3(1), dim3(BSIZE), 0, 0, 2 * i + 1);
hipDeviceSynchronize();
hipMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(&dd[i], acc_knn, sizeof(double), 0, hipMemcpyDeviceToHost);
cout << h_hits[0] + h_hits[1] + h_hits[2] + h_hits[3] << "(" << h_hits[0] << "," << h_hits[1] << "," << h_hits[2] << "," << h_hits[3] << ") ";
}
double max_acc_train = .0;
int max_acc_k_train = -1;
for (int i = 0; i < 20; ++ i){
if (dd[i] > max_acc_train){
max_acc_train = dd[i];
max_acc_k_train = 2 * i + 1;
}
}
if (max_acc_train >= global_max_acc_train && iter>10){
global_max_acc_train = max_acc_train;
global_max_iter_train = iter;
global_max_pos_train = max_acc_k_train;
}
cout << endl << "max acc = " << max_acc_train << " at k = " << max_acc_k_train
<< ". global max = " << global_max_acc_train << " in iter " << global_max_iter_train << " at k = " << global_max_pos_train;
}
else{
cout << ", increased by " << dd[9] - f_old;
alpha /= 10;
hipLaunchKernelGGL(( copyO), dim3(84), dim3(BSIZE), 0, 0, );
hipLaunchKernelGGL(( update2), dim3(84), dim3(256), 0, 0, );
// update t_triplet by calculating vdist of every (i, j, l)
hipLaunchKernelGGL(( zeroT_triplet), dim3(84), dim3(256), 0, 0, );
hipLaunchKernelGGL(( update3_2), dim3(84), dim3(256), 0, 0, );
}
cout << endl << "min_f = " << f_old << " at iter " << min_iter << ", alpha = " << alpha << endl;
// t_update = I - 2 * alpha * (t_target + t_triplet)
hipLaunchKernelGGL(( updateUpdateTerm), dim3(84), dim3(256), 0, 0, alpha);
// update omega = omega * t_update
hipLaunchKernelGGL(( zeroO), dim3(84), dim3(256), 0, 0, );
dim3 dimGrid(d, (n - 1) / BSIZE + 1);
dim3 dimBlock(BSIZE);
hipLaunchKernelGGL(( updateO1), dim3(dimGrid), dim3(dimBlock), 0, 0, );
hipDeviceSynchronize();
float time_kernel;
hipEventRecord(stop_event, 0);
hipEventElapsedTime(&time_kernel, start_event, stop_event);
cout << "time " << time_kernel/1000 << " at " << path << endl;
++ iter;
//if (iter > 100)
if (alpha < 1e-10)
break;
}
}
|
18de8f1e3b4c5d3074a6ac68da705ecf4b53b727.cu
|
#include "mycommon.h"
#define BSIZE 256
__constant__ int nfeat;
__constant__ int ntrain;
__constant__ int ntest;
__constant__ int nclass;
__constant__ int k;
__constant__ int nnegibor;
__constant__ double mu;
__constant__ double nu[4];
__constant__ int idx_o;
__constant__ int *target;
__constant__ double *km_train;
__constant__ double *km_test;
__constant__ double *O[2];
__constant__ double *t_target;
__constant__ double *t_triplet;
__constant__ double *t_update;
__constant__ double *t_gradient;
__constant__ short *label_train;
__constant__ short *label_test;
__constant__ struct Inst *grouped_inst;
__constant__ struct Inst *type_inst[4];
__constant__ unsigned typecount[4];
__constant__ int *target_offset;
__constant__ int nn[4];
__constant__ double *dist_target;
__constant__ double *dist1;
__constant__ double *dist2;
__constant__ double *hinge_val;
__constant__ double *dist_knn;
__constant__ int *ino_knn;
__constant__ int *neighbor_knn;
__device__ double f_val;
__device__ double sub_fval[84];
__device__ double acc_knn;
__device__ int hits[4];
__device__ void kernelMatrix(double *km, double *d1, int n1, double *d2, int n2){
int ub = n1 * n2;
int stride = blockDim.x * gridDim.x;
double c_val;
int i, j;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
i = m / n2;
j = m % n2;
c_val = .0;
for (int n = 0; n < nfeat; ++ n)
c_val += pow(d1[n * n1 + i] - d2[n * n2 + j], 2);
km[m] = exp(-c_val / nfeat);
}
}
__global__ void calcKM(double *train, double *test){
kernelMatrix(km_train, train, ntrain, train, ntrain);
kernelMatrix(km_test, test, ntest, train, ntrain);
}
__device__ double getElement(double *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElement(double *m, int i, int j, int stride, double val){
m[i * stride + j] = val;
}
__device__ int getElementInt(int *m, int i, int j, int stride){
return *(m + i * stride + j);
}
__device__ void setElementInt(int *m, int i, int j, int stride, int val){
m[i * stride + j] = val;
}
//__device__ int getTarget(int i, int kk){
// return target[i * k + kk];
//}
__device__ int getTargetByOffset(int ino, int kk){
return target[target_offset[ino] + kk];
}
__device__ void setTargetByOffset(int ino, int kk, int t){
target[target_offset[ino] + kk] = t;
}
__device__ int getTargetDist(int ino, int kk){
return dist_target[target_offset[ino] + kk];
}
__device__ double calcDist(int i, double *km1, int j, double *km2){
int tid = threadIdx.x;
__shared__ double diff_k[256];
__shared__ double sum[256];
__shared__ double norm[64];
if (tid < 64)
norm[tid] = .0;
int pos;
for (int m = 0; m < (ntrain - 1)/blockDim.x + 1; ++ m){
__syncthreads();
pos = m * blockDim.x + tid;
if (pos < ntrain)
diff_k[tid] = getElement(km1, i, pos, ntrain) - getElement(km2, j, pos, ntrain);
for (int d = 0; d < nfeat; ++ d){
__syncthreads();
if (pos < ntrain)
sum[tid] = getElement(O[idx_o], d, pos, ntrain) * diff_k[tid];
else
sum[tid] = .0;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride)
sum[tid] += sum[tid + stride];
stride /= 2;
}
__syncthreads();
if (tid == 0)
norm[d] += sum[0];
}
}
if (tid < nfeat)
norm[tid] = norm[tid]*norm[tid];
__syncthreads();
double s = .0;
for (int d = 0; d < nfeat; ++ d)
s += norm[d];
return s;
}
__device__ void calcTargetDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
if (tid == 0)
sub_fval[bid] = .0;
int c = 0;
for (int m = 0; m < ntrain; ++ m)
for (int n = 0; n < nn[label_train[m]]; ++ n){
i = m;
j = getTargetByOffset(m, n);
if(c%gridDim.x == bid){
double val = calcDist(i, km_train, j, km_train);
if (tid == 0){
dist_target[target_offset[m] + n] = val;
sub_fval[bid] += val;
}
}
++ c;
}
}
__device__ void updateDist(double *dist, struct Inst * inst1, int height, struct Inst * inst2, int width){
int tid = threadIdx.x;
int bid = blockIdx.x;
int i, j;
for (int m = bid; m < height * width; m += gridDim.x){
i = inst1[m / width].ino;
j = inst2[m % width].ino;
double val = calcDist(i, km_train, j, km_train);
if (tid == 0)
dist[m] = val;
}
}
__global__ void update2(){
calcTargetDist();
updateDist(dist1, type_inst[TN], typecount[TN], type_inst[FN], typecount[FN]);
if (nclass == 4)
updateDist(dist2, type_inst[TP], typecount[TP], type_inst[FP], typecount[FP]);
}
__device__ double hinge(double s){
if (s <= -1.0)
return .0;
else if (s >= 0)
return 1.0;
else
return 1 + s;
}
__device__ void updateTri(int idx1, int idx2, int idx3, double h){
__syncthreads();
for (int p = threadIdx.x; p < ntrain; p += blockDim.x)
t_triplet[p * ntrain + idx1] += h * (getElement(km_train, idx2, p, ntrain) - getElement(km_train, idx3, p, ntrain));
}
__global__ void zeroT_triplet(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < ntrain * ntrain; m += size)
t_triplet[m] = .0;
}
__global__ void update3_2(){
int bid = blockIdx.x;
int i, j, l;
double vdist, h;
if (bid == 0 && threadIdx.x == 0)
f_val = .0;
for (int m = 0; m < typecount[TN] * typecount[FN]; ++ m){
i = type_inst[TN][m / typecount[FN]].ino;
l = type_inst[FN][m % typecount[FN]].ino;
for (int kk = 0; kk < nn[TN]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
//if (label_train[i] == TP)
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
l = type_inst[TN][m / typecount[FN]].ino;
i = type_inst[FN][m % typecount[FN]].ino;
for (int kk = 0; kk < nn[FN]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist1[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
//if (label_train[i] == TP)
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
}
if (nclass == 4){
for (int m = 0; m < typecount[TP] * typecount[FP]; ++ m){
i = type_inst[TP][m / typecount[FP]].ino;
l = type_inst[FP][m % typecount[FP]].ino;
for (int kk = 0; kk < nn[TP]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist2[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
l = type_inst[TP][m / typecount[FP]].ino;
i = type_inst[FP][m % typecount[FP]].ino;
for (int kk = 0; kk < nn[FP]; ++ kk){
j = getTargetByOffset(i, kk);
vdist = 1 + dist_target[target_offset[i] + kk] - dist2[m];
if (vdist > 0 && blockIdx.x == 0 && threadIdx.x == 0)
f_val += vdist;
h = hinge(vdist);
if (h > 0){
h *= nu[label_train[i]];
if (i % gridDim.x == bid)
updateTri(i, l, j, h);
if (j % gridDim.x == bid)
updateTri(j, j, i, h);
if (l % gridDim.x == bid)
updateTri(l, i, l, h);
}
}
}
}
}
__global__ void calcFval(){
if (blockIdx.x == 0 && threadIdx.x == 0)
for (int i = 0; i < gridDim.x; ++ i)
f_val += sub_fval[i];
}
__global__ void updateUpdateTerm(double alpha){
int size = gridDim.x * blockDim.x;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ntrain * ntrain; m += size){
if (m/ntrain == m%ntrain)
t_update[m] = 1 - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
else
t_update[m] = - 2 * alpha * (t_target[m] + mu * t_triplet[m]);
}
}
__global__ void copyO(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < nfeat * ntrain; m += size)
O[idx_o][m] = O[1 - idx_o][m];
}
__global__ void zeroO(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < nfeat * ntrain; m += size)
O[1 - idx_o][m] = .0;
}
__global__ void updateO1(){
int tid = threadIdx.x;
int bid_row = blockIdx.x;
int bid_col = blockIdx.y;
int workingtid = min(BSIZE, ntrain - bid_col * BSIZE);
if (tid < workingtid)
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] = .0;
for (int start = 0; start < ntrain; start += BSIZE){
int len = min(BSIZE, ntrain - start);
for (int i = 0; i < len; ++ i){
if (tid < workingtid){
double val = getElement(O[idx_o], bid_row, start + i, ntrain) * getElement(t_update, i + start, bid_col * BSIZE + tid, ntrain);
O[1 - idx_o][bid_row * ntrain + bid_col * BSIZE + tid] += val;
}
}
}
}
__global__ void knnUpdateDist(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x;
for(int m = bid; m < ntest * ntrain; m += size){
int i = m / ntrain;
int j = m % ntrain;
double d = DBL_MAX;
if (nclass == 2)
d = calcDist(i, km_test, j, km_train);
else{
if (label_test[i] == label_train[j] || label_test[i] + label_train[j] == 3)
d = calcDist(i, km_test, j, km_train);
}
if (tid == 0){
ino_knn[m] = j;
dist_knn[m] = d;
}
}
}
// lauched with # block = ntest
__global__ void knnFindNeighbor(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int len = ntrain / BSIZE;
int start = tid * len;
if (tid < ntrain % BSIZE){
start += tid;
++ len;
}
else
start += ntrain % BSIZE;
__syncthreads();
int b = min(len, nnegibor);
for (int i = 0; i < b; ++ i)
for (int j = start; j < start + len - i - 1; ++ j)
if(getElement(dist_knn, bid, j, ntrain) < getElement(dist_knn, bid, j + 1, ntrain)){
double tmp_dist = getElement(dist_knn, bid, j, ntrain);
setElement(dist_knn, bid, j, ntrain, getElement(dist_knn, bid, j + 1, ntrain));
setElement(dist_knn, bid, j + 1, ntrain, tmp_dist);
int tmp_ino = getElementInt(ino_knn, bid, j, ntrain);
setElementInt(ino_knn, bid, j, ntrain, getElementInt(ino_knn, bid, j + 1, ntrain));
setElementInt(ino_knn, bid, j + 1, ntrain, tmp_ino);
}
__syncthreads();
__shared__ double dist[BSIZE];
__shared__ int ino[BSIZE];
__shared__ int shortest[BSIZE];
int p = start + len -1;
for (int i = 0; i < nnegibor; ++ i){
if (b > 0){
dist[tid] = getElement(dist_knn, bid, p, ntrain);
ino[tid] = getElementInt(ino_knn, bid, p, ntrain);
}
else
dist[tid] = DBL_MAX;
shortest[tid] = tid;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride){
if (dist[tid] > dist[tid + stride]){
dist[tid] = dist[tid + stride];
ino[tid] = ino[tid + stride];
shortest[tid] = shortest[tid + stride];
}
}
stride /= 2;
}
__syncthreads();
if(tid == 0)
setElementInt(neighbor_knn, bid, i, nnegibor, ino[0]);
if(tid == shortest[0]){
-- b;
-- p;
}
}
}
__global__ void knnMatching(){
int ub = ntest * nnegibor;
int stride = blockDim.x * gridDim.x;
int idx_test, idx_train;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
idx_test = m / nnegibor;
idx_train = neighbor_knn[m];
if (label_test[idx_test] == label_train[idx_train])
neighbor_knn[m] = 1;
else
neighbor_knn[m] = 0;
}
}
// lauch with single block
__global__ void knnAcc(int neiborhood_size){
int tid = threadIdx.x;
int stride = blockDim.x;
if (tid < 4)
hits[tid] = 0;
__shared__ int matched[BSIZE];
matched[tid] = 0;
for (int m = tid; m < ntest; m += stride){
int nsametype = 0;
for (int i = 0; i < neiborhood_size; ++ i)
nsametype += neighbor_knn[m * nnegibor + i];
if (nsametype > neiborhood_size/2){
matched[tid] += 1;
if (label_test[m] == FN || label_test[m] == FP)
atomicAdd(&hits[label_test[m]], 1);
}
else{
if (label_test[m] == TN || label_test[m] == TP)
atomicSub(&hits[label_test[m]], 1);
}
}
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
matched[tid] += matched[tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid ==0)
acc_knn = 1.0 * matched[0] / ntest;
}
__global__ void knnUpdateDist_train(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x;
for(int m = bid; m < ntrain * ntrain; m += size){
int i = m / ntrain;
int j = m % ntrain;
double d = DBL_MAX;
if (i != j)
if (nclass == 2)
d = calcDist(i, km_train, j, km_train);
else
if (label_train[i] == label_train[j] || label_train[i] + label_train[j] == 3)
d = calcDist(i, km_train, j, km_train);
if (tid == 0){
ino_knn[m] = j;
dist_knn[m] = d;
}
}
}
// lauched with # block = ntrain
__global__ void knnFindNeighbor_train(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int len = ntrain / BSIZE;
int start = tid * len;
if (tid < ntrain % BSIZE){
start += tid;
++ len;
}
else
start += ntrain % BSIZE;
__syncthreads();
int b = min(len, nnegibor);
/* each thread sort its own chunk (start, len) by bubble sorting for b iterations.
First b elements of ino_knn hold the closest b neighbors.*/
for (int i = 0; i < b; ++ i)
for (int j = start; j < start + len - i - 1; ++ j)
if(getElement(dist_knn, bid, j, ntrain) < getElement(dist_knn, bid, j + 1, ntrain)){
double tmp_dist = getElement(dist_knn, bid, j, ntrain);
setElement(dist_knn, bid, j, ntrain, getElement(dist_knn, bid, j + 1, ntrain));
setElement(dist_knn, bid, j + 1, ntrain, tmp_dist);
int tmp_ino = getElementInt(ino_knn, bid, j, ntrain);
setElementInt(ino_knn, bid, j, ntrain, getElementInt(ino_knn, bid, j + 1, ntrain));
setElementInt(ino_knn, bid, j + 1, ntrain, tmp_ino);
}
__syncthreads();
__shared__ double dist[BSIZE];
__shared__ int ino[BSIZE];
__shared__ int shortest[BSIZE];
/* perform a merge sort of BSIZE sorted chunk. */
int p = start + len -1;
for (int i = 0; i < nnegibor; ++ i){
if (b > 0){
dist[tid] = getElement(dist_knn, bid, p, ntrain);
ino[tid] = getElementInt(ino_knn, bid, p, ntrain);
}
else
dist[tid] = DBL_MAX;
shortest[tid] = tid;
int stride = blockDim.x/2;
while (stride > 0){
__syncthreads();
if (tid < stride){
if (dist[tid] > dist[tid + stride]){
dist[tid] = dist[tid + stride];
ino[tid] = ino[tid + stride];
shortest[tid] = shortest[tid + stride];
}
}
stride /= 2;
}
__syncthreads();
if(tid == 0)
setElementInt(neighbor_knn, bid, i, nnegibor, ino[0]);
if(tid == shortest[0]){
-- b;
-- p;
}
}
}
__global__ void knnMatching_train(){
int ub = ntrain * nnegibor;
int stride = blockDim.x * gridDim.x;
int idx_train1, idx_train2;
for (int m = blockIdx.x * blockDim.x + threadIdx.x; m < ub; m += stride){
idx_train1 = m / nnegibor;
idx_train2 = neighbor_knn[m];
if (label_train[idx_train1] == label_train[idx_train2])
neighbor_knn[m] = 1;
else
neighbor_knn[m] = 0;
}
}
// lauch with single block
__global__ void knnAcc_train(int neiborhood_size){
int tid = threadIdx.x;
int stride = blockDim.x;
if (tid < 4)
hits[tid] = 0;
__shared__ int matched[BSIZE];
matched[tid] = 0;
for (int m = tid; m < ntrain; m += stride){
int nsametype = 0;
for (int i = 0; i < neiborhood_size; ++ i)
nsametype += neighbor_knn[m * nnegibor + i];
if (nsametype > neiborhood_size/2){
matched[tid] += 1;
if (label_train[m] == FN || label_train[m] == FP)
atomicAdd(&hits[label_train[m]], 1);
}
else{
if (label_train[m] == TN || label_train[m] == TP)
atomicSub(&hits[label_train[m]], 1);
}
}
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
matched[tid] += matched[tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid ==0)
acc_knn = 1.0 * matched[0] / ntrain;
}
__global__ void updateTarget(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
int max_nn = max(max(nn[0], nn[1]), max(nn[2], nn[3]));
for (int m = blockDim.x * bid + tid; m < ntrain * max_nn; m += size){
int ino = m / max_nn;
int idx_neighbor = m % max_nn;
if (idx_neighbor < nn[label_train[ino]])
setTargetByOffset(ino, idx_neighbor, getElementInt(neighbor_knn, ino, idx_neighbor, nnegibor));
}
}
__global__ void zeroTargetTerm(){
int tid = threadIdx.x;
int bid = blockIdx.x;
int size = gridDim.x * blockDim.x;
for (int m = blockDim.x * bid + tid; m < ntrain * ntrain; m += size)
t_target[m] = .0;
}
__device__ void updateTar(int idx1, int idx2, double h){
__syncthreads();
for (int p = threadIdx.x; p < ntrain; p += blockDim.x)
t_target[p * ntrain + idx1] += h * (getElement(km_train, idx1, p, ntrain) - getElement(km_train, idx2, p, ntrain));
}
__global__ void updateTargetTerm(){
int i, j;
double h;
int bid = blockIdx.x;
for (i = 0; i < ntrain; ++ i){
for (int kk = 0; kk < nn[label_train[i]]; ++ kk){
j = getTargetByOffset(i, kk);
//if (label_train[i] == TP)
// h = nu;
//else
//h = 1.0;
h = nu[label_train[i]];
if (i % gridDim.x == bid)
updateTar(i, j, h);
if (j % gridDim.x == bid)
updateTar(j, i, h);
}
}
}
__global__ void countTarget(){
__shared__ int stay[BSIZE*4];
int tid = threadIdx.x;
for (int i = 0; i < 4; ++ i)
stay[tid + BSIZE * i] = 0;
for(int m = tid; m < ntrain; m += BSIZE){
int l = label_train[m];
for (int i = 0; i < nn[l]; ++ i){
int n = getElementInt(neighbor_knn, m, i, nnegibor);
for (int j = 0; j < nn[l]; ++ j){
int t = getTargetByOffset(m, j);
if ( n == t)
++ stay[l * BSIZE + tid];
}
}
}
for (int i = 0; i < 4; ++ i){
int stride1 = blockDim.x/2;
while (stride1 > 0){
__syncthreads();
if (tid < stride1)
stay[BSIZE * i + tid] += stay[BSIZE * i + tid + stride1];
stride1 /= 2;
}
__syncthreads();
if (tid == 0)
hits[i] = stay[BSIZE * i];
}
}
void deviceInitKernelMatrix(int *trainninst, int *testninst, int *nf, double *traindata, double *testdata){
cudaMemcpyToSymbol(ntrain, trainninst, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(ntest, testninst, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nfeat, nf, sizeof(int), 0, cudaMemcpyHostToDevice);
double *d_train_data, *d_test_data;
cudaMalloc((void **)&d_train_data, sizeof(double) * (*trainninst) * (*nf));
cudaMalloc((void **)&d_test_data, sizeof(double) * (*testninst) * (*nf));
cudaMemcpy(d_train_data, traindata, sizeof(double) * (*trainninst) * (*nf), cudaMemcpyHostToDevice);
cudaMemcpy(d_test_data, testdata, sizeof(double) * (*testninst) * (*nf), cudaMemcpyHostToDevice);
double *d_kernel_matrix_train, *d_kernel_matrix_test;
cudaMalloc((void **)&d_kernel_matrix_train, sizeof(double) * (*trainninst) * (*trainninst));
cudaMemcpyToSymbol(km_train, &d_kernel_matrix_train, sizeof(double*), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_kernel_matrix_test, sizeof(double) * (*testninst) * (*trainninst));
cudaMemcpyToSymbol(km_test, &d_kernel_matrix_test, sizeof(double*), 0, cudaMemcpyHostToDevice);
// Run the event recording
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event) ;
cudaEventCreate(&stop_event) ;
cudaEventRecord(start_event, 0);
calcKM<<<84, 256>>>(d_train_data, d_test_data);
cudaThreadSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaFree(d_train_data);
cudaFree(d_test_data);
}
unsigned *tcount;
int *ncount;
int totalMissed;
double targetCoverage[4];
double minCoverage;
int super = 0;
void deviceInitTarget(int *h_target, int trainninst, int targetsize, int *nc, int *Nneighbor, int *offset){
ncount = Nneighbor;
int *d_target;
cudaMalloc((void **)&d_target, sizeof(int) * targetsize);
cudaMemcpy(d_target, h_target, sizeof(int) * targetsize, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(target, &d_target, sizeof(int*), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_target, sizeof(int) * trainninst);
cudaMemcpy(d_target, offset, sizeof(int) * trainninst, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(target_offset, &d_target, sizeof(int*), 0, cudaMemcpyHostToDevice);
//cudaMemcpyToSymbol(k, kk, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nclass, nc, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nn, Nneighbor, sizeof(int) * 4, 0, cudaMemcpyHostToDevice);
}
void deviceInitLabelTrain(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
cudaMalloc((void **)&d_label, sizeof(short) * ninst);
cudaMemcpy(d_label, label, sizeof(short) * ninst, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(label_train, &d_label, sizeof(short*), 0, cudaMemcpyHostToDevice);
delete[] label;
}
void deviceInitLabelTest(struct Inst *inst, unsigned ninst){
short *label = new short[ninst];
for (int i = 0; i < ninst; ++ i)
label[i] = inst[i].label;
short *d_label;
cudaMalloc((void **)&d_label, sizeof(short) * ninst);
cudaMemcpy(d_label, label, sizeof(short) * ninst, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(label_test, &d_label, sizeof(short*), 0, cudaMemcpyHostToDevice);
delete[] label;
}
void deviceInitInstList(struct Inst *inst, unsigned *count, unsigned ninst, int nc, int targetsize){
tcount = count;
cudaMemcpyToSymbol(typecount, count, sizeof(unsigned) * 4, 0, cudaMemcpyHostToDevice);
struct Inst *gi[4];
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
gi[i] = (struct Inst *)malloc(sizeof(struct Inst) * count[i]);
}
int p[4] = {0, 0, 0, 0};
for(int i = 0; i < ninst; ++ i){
int type = inst[i].label;
gi[type][p[type]].ino = inst[i].ino;
gi[type][p[type]].label = inst[i].label;
++ p[type];
}
struct Inst *d_inst;
cudaMalloc((void **)&d_inst, sizeof(struct Inst) * ninst);
unsigned start = 0;
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
cudaMemcpy(d_inst + start, gi[i], sizeof(struct Inst) * count[i], cudaMemcpyHostToDevice);
struct Inst *dd_inst = d_inst + start;
cudaMemcpyToSymbol(type_inst, &dd_inst, sizeof(struct Inst *), i * sizeof(struct Inst *), cudaMemcpyHostToDevice);
start += count[i];
}
cudaMemcpyToSymbol(grouped_inst, &d_inst, sizeof(struct Inst *), 0, cudaMemcpyHostToDevice);
for (int i = 0; i < 4; ++ i){
if (count[i] > 0)
free(gi[i]);
}
double *distanceTarget, *distanceMatrix1, *distanceMatrix2;
cudaMalloc((void **)&distanceTarget, sizeof(double) * targetsize);
cudaMemcpyToSymbol(dist_target, &distanceTarget, sizeof(double *), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&distanceMatrix1, sizeof(double) * count[TN] * count[FN]);
cudaMemcpyToSymbol(dist1, &distanceMatrix1, sizeof(double *), 0, cudaMemcpyHostToDevice);
if (nc == 4){
cudaMalloc((void **)&distanceMatrix2, sizeof(double) * count[TP] * count[FP]);
cudaMemcpyToSymbol(dist2, &distanceMatrix2, sizeof(double *), 0, cudaMemcpyHostToDevice);
}
}
void deviceInitMu(double m, double n[]){
double local_m = m;
cudaMemcpyToSymbol(mu, &local_m, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nu, n, sizeof(double) * 4, 0, cudaMemcpyHostToDevice);
double nn[4];
cudaMemcpyFromSymbol(nn, nu, sizeof(double) * 4, 0, cudaMemcpyDeviceToHost);
cout << "retrieve nu: " << nn[0] << " " << nn[1] << " " << nn[2] << " " << nn[3] << endl;
}
void deviceInitO(double *o, int size){
double *d_t;
//cout << "double O: " << o[1] << endl;
cudaMalloc((void **)&d_t, sizeof(double) * size);
cudaMemcpy(d_t, o, sizeof(double) * size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(O, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
cudaMalloc((void **)&d_t, sizeof(double) * size);
cudaMemcpyToSymbol(O, &d_t, sizeof(double*), sizeof(double*), cudaMemcpyHostToDevice);
//cout << "d_t: " << d_t << endl;
}
void deviceInitTargetTerm(double *t, int size){
double *d_t;
cudaMalloc((void **)&d_t, sizeof(double) * size);
cudaMemcpy(d_t, t, sizeof(double) * size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(t_target, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
}
void deviceInitUpdateTerm(int size1, int size2){
double *d_t;
cudaMalloc((void **)&d_t, sizeof(double) * size1);
cudaMemcpyToSymbol(t_update, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_t, sizeof(double) * size2);
cudaMemcpyToSymbol(t_gradient, &d_t, sizeof(double*), 0, cudaMemcpyHostToDevice);
}
void deviceInitTri(int size){
double *t_o;
cudaMalloc((void **)&t_o, sizeof(double) * size);
cudaMemcpyToSymbol(t_triplet, &t_o, sizeof(double*), 0, cudaMemcpyHostToDevice);
}
void deviceInitKnn(int n_train, int n_test, int kk){
double *d_knn;
//cudaMalloc((void **)&d_knn, sizeof(double) * n_test * n_train);
cudaMalloc((void **)&d_knn, sizeof(double) * n_train * n_train);
cudaMemcpyToSymbol(dist_knn, &d_knn, sizeof(double*), 0, cudaMemcpyHostToDevice);
int* i_knn;
//cudaMalloc((void **)&i_knn, sizeof(int) * n_test * n_train);
cudaMalloc((void **)&i_knn, sizeof(int) * n_train * n_train);
cudaMemcpyToSymbol(ino_knn, &i_knn, sizeof(int*), 0, cudaMemcpyHostToDevice);
//cudaMalloc((void **)&i_knn, sizeof(int) * n_test * kk);
cudaMalloc((void **)&i_knn, sizeof(int) * n_train * kk);
cudaMemcpyToSymbol(neighbor_knn, &i_knn, sizeof(int*), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(nnegibor, &kk, sizeof(int), 0, cudaMemcpyHostToDevice);
}
int targetUpdateNeeded(double alpha){
if (super){
super = 0;
return 1;
}
if (alpha < 1e-8 && totalMissed > 0)
//if ((alpha < 1e-8 && totalMissed > 0) || minCoverage < 0.5)
return 1;
else
return 0;
}
void kernelTest(int d, int n, int n_test, int k[], double *result, double mu, double alpha, double nu[]){
char path[1024];
getcwd(path, 1024);
double original_alpha = alpha;
double dd[20];
int h_hits[4];
deviceInitKnn(n, n_test, 40);
double f_old = DBL_MAX;
double min_iter = 0;
double global_max_acc = .0;
unsigned global_max_iter = 0;
unsigned global_max_pos = 0;
double global_max_acc_train = .0;
unsigned global_max_iter_train = 0;
unsigned global_max_pos_train = 0;
int kk = 5;
bool targetUpdated = false;
int idx = 1;
unsigned iter = 0;
while(true){
// Run the event recording
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
cout << endl << "Iter = " << iter << ", mu = " << mu << ", k = " << k[0] << "," << k[1] << "," << k[2] << "," << k[3] << ", nu = " << nu[0] << "," << nu[1] << "," << nu[2] << "," << nu[3] << endl;
idx = 1 - idx;
cudaMemcpyToSymbol(idx_o, &idx, sizeof(int), 0, cudaMemcpyHostToDevice);
// update target and target term periodically
if (targetUpdateNeeded(alpha)){
knnUpdateDist_train<<<84, BSIZE>>>();
knnFindNeighbor_train<<<n, BSIZE>>>();
updateTarget<<<84, BSIZE>>>();
zeroTargetTerm<<<84, BSIZE>>>();
updateTargetTerm<<<84, BSIZE>>>();
alpha = original_alpha;
targetUpdated = true;
}
// update distances to targets(i,j) and between opposing points(i,l)
update2<<<84, 256>>>();
// update t_triplet by calculating vdist of every (i, j, l)
zeroT_triplet<<<84, 256>>>();
update3_2<<<84, 256>>>();
// update object function value
calcFval<<<84, 256>>>();
cudaThreadSynchronize();
cudaMemcpyFromSymbol(&dd[9], f_val, sizeof(double), 0, cudaMemcpyDeviceToHost);
cout << "f_val= " << dd[9];
if (dd[9] < f_old || targetUpdated){
targetUpdated = false;
cout << ", reduced by " << f_old - dd[9] << endl;
f_old = dd[9];
min_iter = iter;
alpha *= 1.1;
knnUpdateDist<<<84, BSIZE>>>();
knnFindNeighbor<<<n_test, BSIZE>>>();
knnMatching<<<84, BSIZE>>>();
for (int i = 0; i < 20; ++ i){
knnAcc<<<1, BSIZE>>>(2 * i + 1);
cudaThreadSynchronize();
cudaMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&dd[i], acc_knn, sizeof(double), 0, cudaMemcpyDeviceToHost);
cout << h_hits[0] + h_hits[1] + h_hits[2] + h_hits[3] << "(" << h_hits[0] << "," << h_hits[1] << "," << h_hits[2] << "," << h_hits[3] << "), ";
//if (2 * i + 1 == kk && h_hits[0] + h_hits[1] + h_hits[2] + h_hits[3] >= 0)
//super = 1;
}
double max_acc = .0;
int max_acc_k = -1;
for (int i = 0; i < 20; ++ i){
if (dd[i] > max_acc){
max_acc = dd[i];
max_acc_k = 2 * i + 1;
}
}
if (max_acc >= global_max_acc&&iter>10){
global_max_acc = max_acc;
global_max_iter = iter;
global_max_pos = max_acc_k;
}
cout << endl << "max acc = " << max_acc << " at k = " << max_acc_k
<< ". global max = " << global_max_acc << " in iter " << global_max_iter << " at k = " << global_max_pos << endl;
knnUpdateDist_train<<<84, BSIZE>>>();
knnFindNeighbor_train<<<n, BSIZE>>>();
countTarget<<<1, BSIZE>>>();
cudaThreadSynchronize();
cudaMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, cudaMemcpyDeviceToHost);
cout << "Targets: "
<< 1.0 * h_hits[0]/(tcount[0]*ncount[0]) << "(" << h_hits[0] << "/" << tcount[0]*ncount[0] << "), "
<< 1.0 * h_hits[1]/(tcount[1]*ncount[1]) << "(" << h_hits[1] << "/" << tcount[1]*ncount[1] << "), "
<< 1.0 * h_hits[2]/(tcount[2]*ncount[2]) << "(" << h_hits[2] << "/" << tcount[2]*ncount[2] << "), "
<< 1.0 * h_hits[3]/(tcount[3]*ncount[3]) << "(" << h_hits[3] << "/" << tcount[3]*ncount[3] << ")"<< endl ;
minCoverage = 1.0;
for (int i = 0; i < 4; ++ i){
targetCoverage[i] = 1.0 * h_hits[i] / (tcount[i]*ncount[i]);
if (minCoverage > targetCoverage[i])
minCoverage = targetCoverage[i];
}
totalMissed = 0;
for (int i = 0; i < 4; ++ i)
totalMissed += tcount[i]*ncount[i] - h_hits[i];
knnMatching_train<<<84, BSIZE>>>();
for (int i = 0; i < 20; ++ i){
knnAcc_train<<<1, BSIZE>>>(2 * i + 1);
cudaThreadSynchronize();
cudaMemcpyFromSymbol(h_hits, hits, sizeof(int) * 4, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&dd[i], acc_knn, sizeof(double), 0, cudaMemcpyDeviceToHost);
cout << h_hits[0] + h_hits[1] + h_hits[2] + h_hits[3] << "(" << h_hits[0] << "," << h_hits[1] << "," << h_hits[2] << "," << h_hits[3] << ") ";
}
double max_acc_train = .0;
int max_acc_k_train = -1;
for (int i = 0; i < 20; ++ i){
if (dd[i] > max_acc_train){
max_acc_train = dd[i];
max_acc_k_train = 2 * i + 1;
}
}
if (max_acc_train >= global_max_acc_train && iter>10){
global_max_acc_train = max_acc_train;
global_max_iter_train = iter;
global_max_pos_train = max_acc_k_train;
}
cout << endl << "max acc = " << max_acc_train << " at k = " << max_acc_k_train
<< ". global max = " << global_max_acc_train << " in iter " << global_max_iter_train << " at k = " << global_max_pos_train;
}
else{
cout << ", increased by " << dd[9] - f_old;
alpha /= 10;
copyO<<<84, BSIZE>>>();
update2<<<84, 256>>>();
// update t_triplet by calculating vdist of every (i, j, l)
zeroT_triplet<<<84, 256>>>();
update3_2<<<84, 256>>>();
}
cout << endl << "min_f = " << f_old << " at iter " << min_iter << ", alpha = " << alpha << endl;
// t_update = I - 2 * alpha * (t_target + t_triplet)
updateUpdateTerm<<<84, 256>>>(alpha);
// update omega = omega * t_update
zeroO<<<84, 256>>>();
dim3 dimGrid(d, (n - 1) / BSIZE + 1);
dim3 dimBlock(BSIZE);
updateO1<<<dimGrid, dimBlock>>>();
cudaThreadSynchronize();
float time_kernel;
cudaEventRecord(stop_event, 0);
cudaEventElapsedTime(&time_kernel, start_event, stop_event);
cout << "time " << time_kernel/1000 << " at " << path << endl;
++ iter;
//if (iter > 100)
if (alpha < 1e-10)
break;
}
}
|
8566b6d462d32f3d760185ba02ecd3420b44b5ae.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// this will return the number of characters for each string
unsigned int NVStrings::len(int* lengths, bool todevice)
{
unsigned int count = size();
if( lengths==0 || count==0 )
return count;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->chars_count();
else
d_rtn[idx] = -1;
});
//
//printCudaError(hipDeviceSynchronize(),"nvs-len");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(lengths,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
// this will return the number of bytes for each string
size_t NVStrings::byte_count(int* lengths, bool todevice)
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !lengths )
todevice = false; // makes sure we free correctly
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->size();
else
d_rtn[idx] = -1;
});
//
//printCudaError(hipDeviceSynchronize(),"nvs-bytes");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
if( lengths )
CUDA_TRY( hipMemcpyAsync(lengths,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
//
unsigned int NVStrings::isalnum( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alnum requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHANUM(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true );
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isalpha( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alpha requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHA(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
//
unsigned int NVStrings::isdigit( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // digit requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DIGIT(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isspace( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // space requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_SPACE(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isdecimal( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // decimal requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DECIMAL(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isnumeric( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // numeric requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_NUMERIC(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::islower( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_LOWER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isupper( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_UPPER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::is_empty( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = true; // null is empty
if( dstr )
brc = dstr->empty(); // requires at least one character
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
|
8566b6d462d32f3d760185ba02ecd3420b44b5ae.cu
|
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// this will return the number of characters for each string
unsigned int NVStrings::len(int* lengths, bool todevice)
{
unsigned int count = size();
if( lengths==0 || count==0 )
return count;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->chars_count();
else
d_rtn[idx] = -1;
});
//
//printCudaError(cudaDeviceSynchronize(),"nvs-len");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(lengths,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
// this will return the number of bytes for each string
size_t NVStrings::byte_count(int* lengths, bool todevice)
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !lengths )
todevice = false; // makes sure we free correctly
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->size();
else
d_rtn[idx] = -1;
});
//
//printCudaError(cudaDeviceSynchronize(),"nvs-bytes");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
if( lengths )
CUDA_TRY( cudaMemcpyAsync(lengths,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
//
unsigned int NVStrings::isalnum( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alnum requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHANUM(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true );
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isalpha( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alpha requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHA(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
//
unsigned int NVStrings::isdigit( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // digit requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DIGIT(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isspace( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // space requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_SPACE(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isdecimal( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // decimal requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DECIMAL(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isnumeric( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // numeric requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_NUMERIC(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::islower( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_LOWER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isupper( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_UPPER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::is_empty( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = true; // null is empty
if( dstr )
brc = dstr->empty(); // requires at least one character
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
|
58aca5c0f69615967b382b758ee83db3fac77b7b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab3_cuda.h"
#include <bits/stdc++.h>
using namespace std;
#define TOLERANCE 0.001
#define JACOBI_UPDATE_TOLERANCE 0.001
__global__ void Matrix_Multiplication_Cuda(double* A, int Am, int An, double* B, int Bm, int Bn, double* C, int Cm, int Cn) {
__shared__ double A_shared[16][16];
__shared__ double B_shared[16][16];
int m = blockIdx.y*16 + threadIdx.y;
int n = blockIdx.x*16 + threadIdx.x;
double mul_c = 0;
for (int i = 0; i < (16 + An - 1)/16; i++) {
if (i*16 + threadIdx.x < An && m < Am) {
A_shared[threadIdx.y][threadIdx.x] = A[m*An + i*16 + threadIdx.x];
}
else {
A_shared[threadIdx.y][threadIdx.x] = 0.0;
}
if (i*16 + threadIdx.y < Bm && n < Bn) {
B_shared[threadIdx.y][threadIdx.x] = B[(i*16 + threadIdx.y)*Bn + n];
}
else {
B_shared[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int n = 0; n < 16; ++n) {
mul_c += A_shared[threadIdx.y][n] * B_shared[n][threadIdx.x];
}
__syncthreads();
}
if (m < Cm && n < Cn) {
C[ ((blockIdx.y * blockDim.y + threadIdx.y)*Cn) + (blockIdx.x*blockDim.x) + threadIdx.x ] = mul_c;
}
}
double* mat_transpose(double* A, int Am, int An) {
double* A_T;
A_T = (double*)malloc(__SIZEOF_POINTER__*An*Am);
for(int i = 0; i < An; i++){
for(int j = 0 ; j < Am; j++){
A_T[i*Am+j] = A[j*An+i];
}
}
return A_T;
}
void print_matrix(double* A, int Am, int An) {
for(int i = 0; i < Am; i++){
for(int j = 0 ; j < An; j++){
printf("%f ", A[i*An+j]);
}
printf("\n");
}
}
void print_vector(double* A, int An) {
printf("[");
for(int i=0; i<An-1; i++)
printf("%f,",A[i]);
printf("%f]\n",A[An-1]);
}
void print_mat(double** A, int Am, int An) {
printf("[");
for (int i=0; i<Am; i++){
if (i>0)
printf(" ");
printf("[");
for (int j=0; j<An-1; j++){
printf("%f, ",A[i][j]);
}
if (i < Am-1)
printf("%f]\n",A[i][An-1]);
}
printf("%f]]\n",A[Am-1][An-1]);
}
double* mat_mul(double* A, int Am, int An,
double* B, int Bm, int Bn){
double *C;
C = (double*)malloc(__SIZEOF_POINTER__*Am*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i*Bn+j] = 0;
for (int k=0; k<An; k++){
C[i*Bn+j] += A[i*An + k] * B[k*Bn + j];
}
}
}
return C;
}
double** mult(double** A, int Am, int An,
double** B, int Bm, int Bn){
double **C;
C = (double**)malloc(__SIZEOF_POINTER__*Am);
for (int i=0; i<Am; i++)
C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i][j] = 0;
for (int k=0; k<An; k++){
C[i][j] += A[i][k] * B[k][j];
}
}
}
return C;
}
double *S; //Symmetric matrix(input)
int N_jacobi;
double *e; //eigenvalues
double **E; //eigenvectors
int *ind;
bool *changed;
int state;
int maxind(int k) {
int m = k+1;
for (int i = k+2; i < N_jacobi; i++){
if (fabs(S[k*N_jacobi + i]) > fabs(S[k*N_jacobi + m])){
m = i;
}
}
return m;
}
void update(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) e[k] = 0;
if (changed[k] && fabs(ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) {
changed[k] = false;
state = state - 1;
}
else if ((! changed[k]) && fabs(ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) {
changed[k] = true;
state = state + 1;
}
}
void rotate(int k, int l, int i, int j, double c, double s,
bool eigenvectors){
double** mat1;
double** mat2;
double** mat3;
mat1 = (double**)malloc(__SIZEOF_POINTER__*2);
mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[0][0] = c; mat1[0][1] = -s;
mat1[1][0] = s; mat1[1][1] = c;
mat2 = (double**)malloc(__SIZEOF_POINTER__*2);
mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1);
mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1);
if (eigenvectors){
mat2[0][0] = E[i][k];
mat2[1][0] = E[i][l];
}
else {
mat2[0][0] = S[k*N_jacobi + l];
mat2[1][0] = S[i*N_jacobi + j];
}
mat3 = mult(mat1, 2, 2, mat2, 2, 1);
if (eigenvectors){
E[i][k] = mat3[0][0];
E[i][l] = mat3[1][0];
}
else{
S[k*N_jacobi + l] = mat3[0][0];
S[i*N_jacobi + j] = mat3[1][0];
}
free(mat1[0]);
free(mat1[1]);
free(mat1);
free(mat2[0]);
free(mat2[1]);
free(mat2);
free(mat3[0]);
free(mat3[1]);
free(mat3);
}
void init_jacobi() {
E = (double**)malloc(__SIZEOF_POINTER__*N_jacobi);
for (int i=0; i<N_jacobi; i++){
E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N_jacobi);
for (int j=0; j<N_jacobi; j++){
E[i][j] = 0;
}
E[i][i] = 1;
}
state = N_jacobi;
e = (double*)malloc(__SIZEOF_DOUBLE__*N_jacobi);
ind = (int*)malloc(__SIZEOF_INT__*N_jacobi);
changed = (bool*)malloc(sizeof(bool)*N_jacobi);
for (int k=0; k<N_jacobi; k++){
ind[k] = maxind(k);
e[k] = S[k*N_jacobi + k];
changed[k] = true;
}
}
void Jacobi(double* input_matrix, int n,
double** eigenvalues, double*** eigenvectors) {
N_jacobi = n;
S = input_matrix;
init_jacobi();
while(state != 0){
int m = 0;
for (int k=1; k<N_jacobi-1; k++){
if (fabs(S[k*N_jacobi + ind[k]]) > fabs(S[m*N_jacobi + ind[m]])){
m = k;
}
}
int k = m;
int l = ind[m];
double p = S[k*N_jacobi + l];
double y = (e[l] - e[k]) / 2.0;
double d = fabs(y) + sqrt(p*p + y*y);
double r = sqrt(p*p + d*d);
double c = d / r;
double s = p / r;
double t = (p*p) / d;
if (y < 0.0) { s = -s; t = -t; }
S[k*N_jacobi + l] = 0.0;
update(k, -t);
update(l, t);
for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); }
for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); }
for (int i=l+1; i<N_jacobi; i++) { rotate(k, i, l, i, c, s, false); }
for (int i=0; i<N_jacobi; i++){
rotate(k, l, i, i, c, s, true);
}
ind[k] = maxind(k);
ind[l] = maxind(l);
}
*eigenvalues = e;
*eigenvectors = E;
}
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void SVD_and_PCA (int M,
int N,
double* D,
double** U,
double** SIGMA,
double** V_T,
int* SIGMAm,
int* SIGMAn,
double** D_HAT,
int *K,
int retention) {
// write your code here
double *eigenvalues, **eigenvectors;
printf("\nD = \n");
print_matrix(D,M,N);
double* D_T = mat_transpose(D,M,N);
printf("\nD_T = \n");
print_matrix(D_T,N,M);
double* D_D;
double* DT_D;
double* DTD_D;
double* prod = (double*)malloc(__SIZEOF_POINTER__*N*N);
hipMalloc((void **)&D_D, sizeof(double)*M*N);
hipMalloc((void **)&DT_D, sizeof(double)*N*M);
hipMalloc((void **)&DTD_D, sizeof(double)*N*N);
hipMemcpy(D_D, D, sizeof(double)*M*N, hipMemcpyHostToDevice);
hipMemcpy(DT_D, D_T, sizeof(double)*N*M, hipMemcpyHostToDevice);
dim3 dimGrid0((N + 16 - 1) / 16, (N + 16 - 1) / 16);
dim3 dimBlock0(16, 16);
hipLaunchKernelGGL(( Matrix_Multiplication_Cuda), dim3(dimGrid0), dim3(dimBlock0), 0, 0, DT_D,N,M, D_D,M,N, DTD_D,N,N);
hipMemcpy(prod, DTD_D, sizeof(double)*N*N, hipMemcpyDeviceToHost);
hipFree(D_D);
hipFree(DTD_D);
// double* prod = mat_mul(D_T,N,M,D,M,N);
printf("\nDT_D = \n");
print_matrix(prod,N,N);
Jacobi(prod, N, &eigenvalues, &eigenvectors);
printf("\neigenvalues:\n");
print_vector(eigenvalues, N);
printf("\neigenvectors:\n");
print_mat(eigenvectors, N, N);
vector<pair<double,int>> eigenpairs;
for(int i = 0; i < N; i++) {
eigenpairs.push_back(make_pair(eigenvalues[i],i));
}
sort(eigenpairs.begin(),eigenpairs.end());
reverse(eigenpairs.begin(),eigenpairs.end());
printf("\nsorted eigenvalues = \n");
for(int i = 0; i < N;i++) {
printf("%f ",eigenpairs[i].first);
}
printf("\n\nindices sorted according eigenvalues = \n");
for(int i = 0; i < N;i++) {
printf("%d ",eigenpairs[i].second);
}
printf("\n");
// for(int i = 0; i < N; i++) {
// for(int j = 0 ; j < N ; j++) {
// printf("%f ",eigenvectors[i][j]);
// }
// printf("\n");
// }
double sorted_eigenvectors[N][N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
sorted_eigenvectors[i][j] = eigenvectors[i][eigenpairs[j].second];
}
}
printf("\nsorted eigenvectors = \n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ",sorted_eigenvectors[i][j]);
}
printf("\n");
}
double t_sorted_eigenvectors[N][N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
t_sorted_eigenvectors[i][j] = sorted_eigenvectors[j][i];
}
}
printf("\nt_sorted eigenvectors = \n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ",t_sorted_eigenvectors[i][j]);
}
printf("\n");
}
double* inv_sigma_mat = (double*)malloc(__SIZEOF_POINTER__*N*M);
double* U_transpose = (double*)malloc(__SIZEOF_POINTER__*N*N);
for(int i=0; i<N; i++){
for(int j=0; j<M; j++){
inv_sigma_mat[i*M+j] = 0;
}
}
*SIGMA = (double*)malloc(__SIZEOF_POINTER__*N);
*U = (double*)malloc(__SIZEOF_POINTER__*N*N);
for(int k=0; k<N; k++){
int i = eigenpairs[k].second;
(*SIGMA)[i] = (double)sqrt(eigenpairs[i].first);
inv_sigma_mat[i*N+i] = 1/(double)sqrt(eigenpairs[i].first);
for(int j=0; j<N; j++){
U_transpose[i*N+j] = eigenvectors[j][k];
(*U)[j*N + i] = eigenvectors[j][k];
}
}
printf("\nU = \n");
print_matrix(*U,N,N);
// for(int i = 0; i < N; i++) {
// int p = eigenpairs[i].second;
// inv_sigma_mat[i*N+i] = 1/(double)sqrt(eigenpairs[i].first);
// }
// printf("clear");
// for(int i = 0; i < N; i++)
// {
// (*SIGMA)[i] = (double)sqrt(eigenpairs[i].first);
// }
// for(int i = 0; i < N; i++)
// {
// printf("sigmas = %f\n",(*SIGMA)[i]);
// }
double* inv_sigma_mat_D;
double* U_transpose_D;
double* V_transpose_D;
double* prod1 = (double*)malloc(__SIZEOF_POINTER__*M*N);
double* prod1_D;
hipMalloc((void **)&inv_sigma_mat_D, sizeof(double)*N*M);
hipMalloc((void **)&U_transpose_D, sizeof(double)*N*N);
hipMalloc((void **)&prod1_D, sizeof(double)*M*N);
hipMemcpy(inv_sigma_mat_D, inv_sigma_mat, sizeof(double)*N*M, hipMemcpyHostToDevice);
hipMemcpy(U_transpose_D, U_transpose, sizeof(double)*N*M, hipMemcpyHostToDevice);
dim3 dimGrid1((M + 16 - 1) / 16, (N + 16 - 1) / 16);
dim3 dimBlock1(16, 16);
hipLaunchKernelGGL(( Matrix_Multiplication_Cuda), dim3(dimGrid1), dim3(dimBlock1), 0, 0, inv_sigma_mat_D,M,N, U_transpose_D,N,N, prod1_D,M,N);
hipFree(inv_sigma_mat_D);
hipFree(U_transpose_D);
hipMalloc((void**)&V_transpose_D, sizeof(double)*M*M);
dim3 dimGrid2((M + 16 - 1) / 16, (M + 16 - 1) / 16);
dim3 dimBlock2(16, 16);
*V_T = (double*)malloc(__SIZEOF_POINTER__*M*M);
hipLaunchKernelGGL(( Matrix_Multiplication_Cuda), dim3(dimGrid2), dim3(dimBlock2), 0, 0, prod1_D,M,N, DT_D,N,M, V_transpose_D,M,M);
hipMemcpy(*V_T, V_transpose_D, sizeof(double)*M*M, hipMemcpyDeviceToHost);
hipFree(inv_sigma_mat_D);
hipFree(U_transpose_D);
hipFree(prod1_D);
hipFree(V_transpose_D);
free(prod);
free(prod1);
free(inv_sigma_mat);
free(U_transpose);
printf("\nSVD done!\n");
double eigensum = 0;
for(int i=0; i<N; i++) {
eigensum += eigenpairs[i].first;
}
printf("\nvariance = \n");
double variance[N];
for(int i=0; i<N; i++) {
variance[i] = (eigenpairs[i].first)/eigensum;
printf("%f ",variance[i]);
}
printf("\n");
double travelsum = 0;
int ncols = 0;
for(int i = 0; i<N; i++) {
printf("\ntravelsum = %f\n",travelsum);
if((travelsum*100) < (double)retention){
travelsum += variance[i];
ncols++;
}
else {
break;
}
}
*K = ncols;
printf("\nK = (%d,%d)\n", ncols, *K);
double* U_current = (double*)malloc(__SIZEOF_POINTER__*N*(*K));
for(int i=0; i<N; i++){
for(int j=0; j<ncols; j++){
U_current[i*(ncols) + j] = (*U)[i*N + j];
}
}
printf("\nD = \n");
print_matrix(D,M,N);
printf("\nU_current = \n");
print_matrix(U_current,N,ncols);
printf("\n\n");
*D_HAT = (double*)malloc(__SIZEOF_DOUBLE__*M*ncols);
double* D_Dest;
double* D_HAT_Dest;
double* U_Dest;
hipMalloc((void **)&D_Dest, __SIZEOF_POINTER__*M*N);
hipMalloc((void **)&U_Dest,__SIZEOF_DOUBLE__*N*(ncols));
hipMalloc((void **)&D_HAT_Dest, __SIZEOF_POINTER__*M*(ncols));
hipMemcpy(D_Dest, D, sizeof(double)*M*N, hipMemcpyHostToDevice);
hipMemcpy(U_Dest, U_current, sizeof(double)*N*ncols, hipMemcpyHostToDevice);
double* dd = (double*)malloc(__SIZEOF_POINTER__*M*N);
double* ud = (double*)malloc(__SIZEOF_POINTER__*N*ncols);
hipMemcpy(dd, D_Dest, sizeof(double)*M*N, hipMemcpyDeviceToHost);
hipMemcpy(ud, U_Dest, sizeof(double)*N*ncols, hipMemcpyDeviceToHost);
printf("\nDD = \n");
print_matrix(dd,M,N);
printf("\nU_currentD = \n");
print_matrix(ud,N,ncols);
printf("\n\n");
dim3 dimGrid3((M + 16 - 1) / 16, (M + 16 - 1) / 16);
dim3 dimBlock3(16, 16);
hipLaunchKernelGGL(( Matrix_Multiplication_Cuda), dim3(dimGrid3), dim3(dimBlock3), 0, 0, D_Dest,M,N, U_Dest,N,ncols, D_HAT_Dest,M,ncols);
hipMemcpy(*D_HAT, D_HAT_Dest, sizeof(double)*M*ncols, hipMemcpyDeviceToHost);
printf("\nD_HAT\n");
print_matrix(*D_HAT,M,ncols);
printf("\nV_T = \n");
print_matrix(*V_T,M,M);
/*****************************************************************************************************************************************************/
/* U calculation */
/*****************************************************************************************************************************************************/
// double dv_mat[M][N];
// double sum = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < N; j++) {
// for (int k = 0; k < N; k++) {
// sum = sum + D[i*N+k]*sorted_eigenvectors[k][j];
// }
// dv_mat[i][j] = sum;
// sum = 0;
// }
// }
// double dvsi[M][M];
// double sum1 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < M; j++) {
// for (int k = 0; k < N; k++) {
// sum1 = sum1 + dv_mat[i][k]*inv_sigma_mat[k][j];
// // printf("\n(%d,%d,%d)\n",i,j,k);
// }
// dvsi[i][j] = sum1;
// sum1 = 0;
// }
// }
// // printf("\n\n%f\n\n",dvsi[0][0]);
// printf("\nU = \n");
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < M; j++) {
// printf("%f ",dvsi[i][j]);
// }
// printf("\n");
// }
// /*****************************************************************************************************************************************************/
// /*****************************************************************************************************************************************************/
// /* Correctness Check */
// /*****************************************************************************************************************************************************/
// // usvt
// double usigma[M][N];
// double sum2 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < N; j++) {
// for (int k = 0; k < M; k++) {
// sum2 = sum2 + dvsi[i][k]*sigma_mat[k][j];
// }
// usigma[i][j] = sum2;
// sum2 = 0;
// }
// }
// double usvt[M][N];
// double sum3 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < N; j++) {
// for (int k = 0; k < N; k++) {
// sum3 = sum3 + usigma[i][k]*t_sorted_eigenvectors[k][j];
// }
// usvt[i][j] = sum3;
// sum3 = 0;
// }
// }
// printf("\ncheck_mat = \n");
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < N; j++) {
// printf("%f ",usvt[i][j]);
// }
// printf("\n");
// }
// /*****************************************************************************************************************************************************/
// double eigensum = 0;
// for(int i=0; i<N; i++) {
// eigensum += eigenpairs[i].first;
// }
// printf("\nvariance = \n");
// double variance[N];
// for(int i=0; i<N; i++) {
// variance[i] = (eigenpairs[i].first)/eigensum;
// printf("%f ",variance[i]);
// }
// printf("\n");
// double travelsum = 0;
// int ncols = 0;
// for(int i = 0; i<N; i++) {
// printf("\ntravelsum = %f\n",travelsum);
// if((travelsum*100) < retention){
// travelsum += variance[i];
// ncols++;
// }
// else {
// break;
// }
// }
// printf("\nK = %d\n", ncols);
// double wmat[N][ncols];
// for(int i=0; i<N; i++) {
// for(int j=0; j<ncols; j++) {
// wmat[i][j] = sorted_eigenvectors[i][j];
// }
// }
// printf("\nW = \n");
// for(int i = 0; i < N; i++) {
// for(int j = 0; j < ncols; j++) {
// printf("%f ",wmat[i][j]);
// }
// printf("\n");
// }
// double dhat[M][ncols];
// double sum4 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < ncols; j++) {
// for (int k = 0; k < N; k++) {
// sum4 = sum4 + D[i*N+k]*wmat[k][j];
// }
// dhat[i][j] = sum4;
// sum4 = 0;
// }
// }
// printf("\nD_HAT = \n");
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < ncols; j++) {
// printf("%f ",dhat[i][j]);
// }
// printf("\n");
// }
}
|
58aca5c0f69615967b382b758ee83db3fac77b7b.cu
|
#include "lab3_cuda.h"
#include <bits/stdc++.h>
using namespace std;
#define TOLERANCE 0.001
#define JACOBI_UPDATE_TOLERANCE 0.001
__global__ void Matrix_Multiplication_Cuda(double* A, int Am, int An, double* B, int Bm, int Bn, double* C, int Cm, int Cn) {
__shared__ double A_shared[16][16];
__shared__ double B_shared[16][16];
int m = blockIdx.y*16 + threadIdx.y;
int n = blockIdx.x*16 + threadIdx.x;
double mul_c = 0;
for (int i = 0; i < (16 + An - 1)/16; i++) {
if (i*16 + threadIdx.x < An && m < Am) {
A_shared[threadIdx.y][threadIdx.x] = A[m*An + i*16 + threadIdx.x];
}
else {
A_shared[threadIdx.y][threadIdx.x] = 0.0;
}
if (i*16 + threadIdx.y < Bm && n < Bn) {
B_shared[threadIdx.y][threadIdx.x] = B[(i*16 + threadIdx.y)*Bn + n];
}
else {
B_shared[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int n = 0; n < 16; ++n) {
mul_c += A_shared[threadIdx.y][n] * B_shared[n][threadIdx.x];
}
__syncthreads();
}
if (m < Cm && n < Cn) {
C[ ((blockIdx.y * blockDim.y + threadIdx.y)*Cn) + (blockIdx.x*blockDim.x) + threadIdx.x ] = mul_c;
}
}
double* mat_transpose(double* A, int Am, int An) {
double* A_T;
A_T = (double*)malloc(__SIZEOF_POINTER__*An*Am);
for(int i = 0; i < An; i++){
for(int j = 0 ; j < Am; j++){
A_T[i*Am+j] = A[j*An+i];
}
}
return A_T;
}
void print_matrix(double* A, int Am, int An) {
for(int i = 0; i < Am; i++){
for(int j = 0 ; j < An; j++){
printf("%f ", A[i*An+j]);
}
printf("\n");
}
}
void print_vector(double* A, int An) {
printf("[");
for(int i=0; i<An-1; i++)
printf("%f,",A[i]);
printf("%f]\n",A[An-1]);
}
void print_mat(double** A, int Am, int An) {
printf("[");
for (int i=0; i<Am; i++){
if (i>0)
printf(" ");
printf("[");
for (int j=0; j<An-1; j++){
printf("%f, ",A[i][j]);
}
if (i < Am-1)
printf("%f]\n",A[i][An-1]);
}
printf("%f]]\n",A[Am-1][An-1]);
}
double* mat_mul(double* A, int Am, int An,
double* B, int Bm, int Bn){
double *C;
C = (double*)malloc(__SIZEOF_POINTER__*Am*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i*Bn+j] = 0;
for (int k=0; k<An; k++){
C[i*Bn+j] += A[i*An + k] * B[k*Bn + j];
}
}
}
return C;
}
double** mult(double** A, int Am, int An,
double** B, int Bm, int Bn){
double **C;
C = (double**)malloc(__SIZEOF_POINTER__*Am);
for (int i=0; i<Am; i++)
C[i] = (double*)malloc(__SIZEOF_DOUBLE__*Bn);
for (int i=0; i<Am; i++){
for (int j=0; j<Bn; j++){
C[i][j] = 0;
for (int k=0; k<An; k++){
C[i][j] += A[i][k] * B[k][j];
}
}
}
return C;
}
double *S; //Symmetric matrix(input)
int N_jacobi;
double *e; //eigenvalues
double **E; //eigenvectors
int *ind;
bool *changed;
int state;
int maxind(int k) {
int m = k+1;
for (int i = k+2; i < N_jacobi; i++){
if (fabs(S[k*N_jacobi + i]) > fabs(S[k*N_jacobi + m])){
m = i;
}
}
return m;
}
void update(int k, double t) {
double ek_prev = e[k];
e[k] = ek_prev + t;
if (e[k] < 0) e[k] = 0;
if (changed[k] && fabs(ek_prev - e[k]) < JACOBI_UPDATE_TOLERANCE) {
changed[k] = false;
state = state - 1;
}
else if ((! changed[k]) && fabs(ek_prev - e[k]) > JACOBI_UPDATE_TOLERANCE) {
changed[k] = true;
state = state + 1;
}
}
void rotate(int k, int l, int i, int j, double c, double s,
bool eigenvectors){
double** mat1;
double** mat2;
double** mat3;
mat1 = (double**)malloc(__SIZEOF_POINTER__*2);
mat1[0] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[1] = (double*)malloc(__SIZEOF_DOUBLE__*2);
mat1[0][0] = c; mat1[0][1] = -s;
mat1[1][0] = s; mat1[1][1] = c;
mat2 = (double**)malloc(__SIZEOF_POINTER__*2);
mat2[0] = (double*)malloc(__SIZEOF_DOUBLE__*1);
mat2[1] = (double*)malloc(__SIZEOF_DOUBLE__*1);
if (eigenvectors){
mat2[0][0] = E[i][k];
mat2[1][0] = E[i][l];
}
else {
mat2[0][0] = S[k*N_jacobi + l];
mat2[1][0] = S[i*N_jacobi + j];
}
mat3 = mult(mat1, 2, 2, mat2, 2, 1);
if (eigenvectors){
E[i][k] = mat3[0][0];
E[i][l] = mat3[1][0];
}
else{
S[k*N_jacobi + l] = mat3[0][0];
S[i*N_jacobi + j] = mat3[1][0];
}
free(mat1[0]);
free(mat1[1]);
free(mat1);
free(mat2[0]);
free(mat2[1]);
free(mat2);
free(mat3[0]);
free(mat3[1]);
free(mat3);
}
void init_jacobi() {
E = (double**)malloc(__SIZEOF_POINTER__*N_jacobi);
for (int i=0; i<N_jacobi; i++){
E[i] = (double*)malloc(__SIZEOF_DOUBLE__*N_jacobi);
for (int j=0; j<N_jacobi; j++){
E[i][j] = 0;
}
E[i][i] = 1;
}
state = N_jacobi;
e = (double*)malloc(__SIZEOF_DOUBLE__*N_jacobi);
ind = (int*)malloc(__SIZEOF_INT__*N_jacobi);
changed = (bool*)malloc(sizeof(bool)*N_jacobi);
for (int k=0; k<N_jacobi; k++){
ind[k] = maxind(k);
e[k] = S[k*N_jacobi + k];
changed[k] = true;
}
}
void Jacobi(double* input_matrix, int n,
double** eigenvalues, double*** eigenvectors) {
N_jacobi = n;
S = input_matrix;
init_jacobi();
while(state != 0){
int m = 0;
for (int k=1; k<N_jacobi-1; k++){
if (fabs(S[k*N_jacobi + ind[k]]) > fabs(S[m*N_jacobi + ind[m]])){
m = k;
}
}
int k = m;
int l = ind[m];
double p = S[k*N_jacobi + l];
double y = (e[l] - e[k]) / 2.0;
double d = fabs(y) + sqrt(p*p + y*y);
double r = sqrt(p*p + d*d);
double c = d / r;
double s = p / r;
double t = (p*p) / d;
if (y < 0.0) { s = -s; t = -t; }
S[k*N_jacobi + l] = 0.0;
update(k, -t);
update(l, t);
for (int i=0; i<k; i++) { rotate(i, k, i, l, c, s, false); }
for (int i=k+1; i<l; i++){ rotate(k, i, i, l, c, s, false); }
for (int i=l+1; i<N_jacobi; i++) { rotate(k, i, l, i, c, s, false); }
for (int i=0; i<N_jacobi; i++){
rotate(k, l, i, i, c, s, true);
}
ind[k] = maxind(k);
ind[l] = maxind(l);
}
*eigenvalues = e;
*eigenvectors = E;
}
// /*
// *****************************************************
// TODO -- You must implement this function
// *****************************************************
// */
void SVD_and_PCA (int M,
int N,
double* D,
double** U,
double** SIGMA,
double** V_T,
int* SIGMAm,
int* SIGMAn,
double** D_HAT,
int *K,
int retention) {
// write your code here
double *eigenvalues, **eigenvectors;
printf("\nD = \n");
print_matrix(D,M,N);
double* D_T = mat_transpose(D,M,N);
printf("\nD_T = \n");
print_matrix(D_T,N,M);
double* D_D;
double* DT_D;
double* DTD_D;
double* prod = (double*)malloc(__SIZEOF_POINTER__*N*N);
cudaMalloc((void **)&D_D, sizeof(double)*M*N);
cudaMalloc((void **)&DT_D, sizeof(double)*N*M);
cudaMalloc((void **)&DTD_D, sizeof(double)*N*N);
cudaMemcpy(D_D, D, sizeof(double)*M*N, cudaMemcpyHostToDevice);
cudaMemcpy(DT_D, D_T, sizeof(double)*N*M, cudaMemcpyHostToDevice);
dim3 dimGrid0((N + 16 - 1) / 16, (N + 16 - 1) / 16);
dim3 dimBlock0(16, 16);
Matrix_Multiplication_Cuda<<<dimGrid0, dimBlock0>>>(DT_D,N,M, D_D,M,N, DTD_D,N,N);
cudaMemcpy(prod, DTD_D, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree(D_D);
cudaFree(DTD_D);
// double* prod = mat_mul(D_T,N,M,D,M,N);
printf("\nDT_D = \n");
print_matrix(prod,N,N);
Jacobi(prod, N, &eigenvalues, &eigenvectors);
printf("\neigenvalues:\n");
print_vector(eigenvalues, N);
printf("\neigenvectors:\n");
print_mat(eigenvectors, N, N);
vector<pair<double,int>> eigenpairs;
for(int i = 0; i < N; i++) {
eigenpairs.push_back(make_pair(eigenvalues[i],i));
}
sort(eigenpairs.begin(),eigenpairs.end());
reverse(eigenpairs.begin(),eigenpairs.end());
printf("\nsorted eigenvalues = \n");
for(int i = 0; i < N;i++) {
printf("%f ",eigenpairs[i].first);
}
printf("\n\nindices sorted according eigenvalues = \n");
for(int i = 0; i < N;i++) {
printf("%d ",eigenpairs[i].second);
}
printf("\n");
// for(int i = 0; i < N; i++) {
// for(int j = 0 ; j < N ; j++) {
// printf("%f ",eigenvectors[i][j]);
// }
// printf("\n");
// }
double sorted_eigenvectors[N][N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
sorted_eigenvectors[i][j] = eigenvectors[i][eigenpairs[j].second];
}
}
printf("\nsorted eigenvectors = \n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ",sorted_eigenvectors[i][j]);
}
printf("\n");
}
double t_sorted_eigenvectors[N][N];
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
t_sorted_eigenvectors[i][j] = sorted_eigenvectors[j][i];
}
}
printf("\nt_sorted eigenvectors = \n");
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
printf("%f ",t_sorted_eigenvectors[i][j]);
}
printf("\n");
}
double* inv_sigma_mat = (double*)malloc(__SIZEOF_POINTER__*N*M);
double* U_transpose = (double*)malloc(__SIZEOF_POINTER__*N*N);
for(int i=0; i<N; i++){
for(int j=0; j<M; j++){
inv_sigma_mat[i*M+j] = 0;
}
}
*SIGMA = (double*)malloc(__SIZEOF_POINTER__*N);
*U = (double*)malloc(__SIZEOF_POINTER__*N*N);
for(int k=0; k<N; k++){
int i = eigenpairs[k].second;
(*SIGMA)[i] = (double)sqrt(eigenpairs[i].first);
inv_sigma_mat[i*N+i] = 1/(double)sqrt(eigenpairs[i].first);
for(int j=0; j<N; j++){
U_transpose[i*N+j] = eigenvectors[j][k];
(*U)[j*N + i] = eigenvectors[j][k];
}
}
printf("\nU = \n");
print_matrix(*U,N,N);
// for(int i = 0; i < N; i++) {
// int p = eigenpairs[i].second;
// inv_sigma_mat[i*N+i] = 1/(double)sqrt(eigenpairs[i].first);
// }
// printf("clear");
// for(int i = 0; i < N; i++)
// {
// (*SIGMA)[i] = (double)sqrt(eigenpairs[i].first);
// }
// for(int i = 0; i < N; i++)
// {
// printf("sigmas = %f\n",(*SIGMA)[i]);
// }
double* inv_sigma_mat_D;
double* U_transpose_D;
double* V_transpose_D;
double* prod1 = (double*)malloc(__SIZEOF_POINTER__*M*N);
double* prod1_D;
cudaMalloc((void **)&inv_sigma_mat_D, sizeof(double)*N*M);
cudaMalloc((void **)&U_transpose_D, sizeof(double)*N*N);
cudaMalloc((void **)&prod1_D, sizeof(double)*M*N);
cudaMemcpy(inv_sigma_mat_D, inv_sigma_mat, sizeof(double)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(U_transpose_D, U_transpose, sizeof(double)*N*M, cudaMemcpyHostToDevice);
dim3 dimGrid1((M + 16 - 1) / 16, (N + 16 - 1) / 16);
dim3 dimBlock1(16, 16);
Matrix_Multiplication_Cuda<<<dimGrid1, dimBlock1>>>(inv_sigma_mat_D,M,N, U_transpose_D,N,N, prod1_D,M,N);
cudaFree(inv_sigma_mat_D);
cudaFree(U_transpose_D);
cudaMalloc((void**)&V_transpose_D, sizeof(double)*M*M);
dim3 dimGrid2((M + 16 - 1) / 16, (M + 16 - 1) / 16);
dim3 dimBlock2(16, 16);
*V_T = (double*)malloc(__SIZEOF_POINTER__*M*M);
Matrix_Multiplication_Cuda<<<dimGrid2, dimBlock2>>>(prod1_D,M,N, DT_D,N,M, V_transpose_D,M,M);
cudaMemcpy(*V_T, V_transpose_D, sizeof(double)*M*M, cudaMemcpyDeviceToHost);
cudaFree(inv_sigma_mat_D);
cudaFree(U_transpose_D);
cudaFree(prod1_D);
cudaFree(V_transpose_D);
free(prod);
free(prod1);
free(inv_sigma_mat);
free(U_transpose);
printf("\nSVD done!\n");
double eigensum = 0;
for(int i=0; i<N; i++) {
eigensum += eigenpairs[i].first;
}
printf("\nvariance = \n");
double variance[N];
for(int i=0; i<N; i++) {
variance[i] = (eigenpairs[i].first)/eigensum;
printf("%f ",variance[i]);
}
printf("\n");
double travelsum = 0;
int ncols = 0;
for(int i = 0; i<N; i++) {
printf("\ntravelsum = %f\n",travelsum);
if((travelsum*100) < (double)retention){
travelsum += variance[i];
ncols++;
}
else {
break;
}
}
*K = ncols;
printf("\nK = (%d,%d)\n", ncols, *K);
double* U_current = (double*)malloc(__SIZEOF_POINTER__*N*(*K));
for(int i=0; i<N; i++){
for(int j=0; j<ncols; j++){
U_current[i*(ncols) + j] = (*U)[i*N + j];
}
}
printf("\nD = \n");
print_matrix(D,M,N);
printf("\nU_current = \n");
print_matrix(U_current,N,ncols);
printf("\n\n");
*D_HAT = (double*)malloc(__SIZEOF_DOUBLE__*M*ncols);
double* D_Dest;
double* D_HAT_Dest;
double* U_Dest;
cudaMalloc((void **)&D_Dest, __SIZEOF_POINTER__*M*N);
cudaMalloc((void **)&U_Dest,__SIZEOF_DOUBLE__*N*(ncols));
cudaMalloc((void **)&D_HAT_Dest, __SIZEOF_POINTER__*M*(ncols));
cudaMemcpy(D_Dest, D, sizeof(double)*M*N, cudaMemcpyHostToDevice);
cudaMemcpy(U_Dest, U_current, sizeof(double)*N*ncols, cudaMemcpyHostToDevice);
double* dd = (double*)malloc(__SIZEOF_POINTER__*M*N);
double* ud = (double*)malloc(__SIZEOF_POINTER__*N*ncols);
cudaMemcpy(dd, D_Dest, sizeof(double)*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy(ud, U_Dest, sizeof(double)*N*ncols, cudaMemcpyDeviceToHost);
printf("\nDD = \n");
print_matrix(dd,M,N);
printf("\nU_currentD = \n");
print_matrix(ud,N,ncols);
printf("\n\n");
dim3 dimGrid3((M + 16 - 1) / 16, (M + 16 - 1) / 16);
dim3 dimBlock3(16, 16);
Matrix_Multiplication_Cuda<<<dimGrid3, dimBlock3>>>(D_Dest,M,N, U_Dest,N,ncols, D_HAT_Dest,M,ncols);
cudaMemcpy(*D_HAT, D_HAT_Dest, sizeof(double)*M*ncols, cudaMemcpyDeviceToHost);
printf("\nD_HAT\n");
print_matrix(*D_HAT,M,ncols);
printf("\nV_T = \n");
print_matrix(*V_T,M,M);
/*****************************************************************************************************************************************************/
/* U calculation */
/*****************************************************************************************************************************************************/
// double dv_mat[M][N];
// double sum = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < N; j++) {
// for (int k = 0; k < N; k++) {
// sum = sum + D[i*N+k]*sorted_eigenvectors[k][j];
// }
// dv_mat[i][j] = sum;
// sum = 0;
// }
// }
// double dvsi[M][M];
// double sum1 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < M; j++) {
// for (int k = 0; k < N; k++) {
// sum1 = sum1 + dv_mat[i][k]*inv_sigma_mat[k][j];
// // printf("\n(%d,%d,%d)\n",i,j,k);
// }
// dvsi[i][j] = sum1;
// sum1 = 0;
// }
// }
// // printf("\n\n%f\n\n",dvsi[0][0]);
// printf("\nU = \n");
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < M; j++) {
// printf("%f ",dvsi[i][j]);
// }
// printf("\n");
// }
// /*****************************************************************************************************************************************************/
// /*****************************************************************************************************************************************************/
// /* Correctness Check */
// /*****************************************************************************************************************************************************/
// // usvt
// double usigma[M][N];
// double sum2 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < N; j++) {
// for (int k = 0; k < M; k++) {
// sum2 = sum2 + dvsi[i][k]*sigma_mat[k][j];
// }
// usigma[i][j] = sum2;
// sum2 = 0;
// }
// }
// double usvt[M][N];
// double sum3 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < N; j++) {
// for (int k = 0; k < N; k++) {
// sum3 = sum3 + usigma[i][k]*t_sorted_eigenvectors[k][j];
// }
// usvt[i][j] = sum3;
// sum3 = 0;
// }
// }
// printf("\ncheck_mat = \n");
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < N; j++) {
// printf("%f ",usvt[i][j]);
// }
// printf("\n");
// }
// /*****************************************************************************************************************************************************/
// double eigensum = 0;
// for(int i=0; i<N; i++) {
// eigensum += eigenpairs[i].first;
// }
// printf("\nvariance = \n");
// double variance[N];
// for(int i=0; i<N; i++) {
// variance[i] = (eigenpairs[i].first)/eigensum;
// printf("%f ",variance[i]);
// }
// printf("\n");
// double travelsum = 0;
// int ncols = 0;
// for(int i = 0; i<N; i++) {
// printf("\ntravelsum = %f\n",travelsum);
// if((travelsum*100) < retention){
// travelsum += variance[i];
// ncols++;
// }
// else {
// break;
// }
// }
// printf("\nK = %d\n", ncols);
// double wmat[N][ncols];
// for(int i=0; i<N; i++) {
// for(int j=0; j<ncols; j++) {
// wmat[i][j] = sorted_eigenvectors[i][j];
// }
// }
// printf("\nW = \n");
// for(int i = 0; i < N; i++) {
// for(int j = 0; j < ncols; j++) {
// printf("%f ",wmat[i][j]);
// }
// printf("\n");
// }
// double dhat[M][ncols];
// double sum4 = 0;
// for (int i = 0; i < M; i++) {
// for (int j = 0; j < ncols; j++) {
// for (int k = 0; k < N; k++) {
// sum4 = sum4 + D[i*N+k]*wmat[k][j];
// }
// dhat[i][j] = sum4;
// sum4 = 0;
// }
// }
// printf("\nD_HAT = \n");
// for(int i = 0; i < M; i++) {
// for(int j = 0; j < ncols; j++) {
// printf("%f ",dhat[i][j]);
// }
// printf("\n");
// }
}
|
90d1c4ca063a240c4d880605c7551c4b9bf0c3b7.hip
|
// !!! This is a file automatically generated by hipify!!!
// runSim.cu
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
// Executes the A1 operator optimized
__global__ void A1_kernel(double* r, double* v, double dt) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
r[id] += v[id] * dt;
}
// Executes the A2 operator
__global__ void A2_kernel(double *r, double *v, double *m, double dt, double *varr, double *status, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1;
double invdist;
double dirvec[3];
if (id < numParticles) {
dirvec[0] = r[0] - r[3*id];
dirvec[1] = r[1] - r[3*id+1];
dirvec[2] = r[2] - r[3*id+2];
// Distance between particle 0 and i
invdist = dt * rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\
rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\
rnorm3d(dirvec[0], dirvec[1], dirvec[2]);
if (status[id] == 0) {
v[3*id] += 0;
v[3*id+1] += 0;
v[3*id+2] += 0;
varr[id] = 0;
varr[numParticles+id] = 0;
varr[2*numParticles+id] = 0;
}
else {
// Update velocities of particles 1 through N-1
v[3*id] += m[0] * invdist * dirvec[0];
v[3*id+1] += m[0] * invdist * dirvec[1];
v[3*id+2] += m[0] * invdist * dirvec[2];
varr[id] = -m[id] * invdist * dirvec[0];
varr[numParticles+id] = -m[id] * invdist * dirvec[1];
varr[2*numParticles+id] = -m[id] * invdist * dirvec[2];
}
varr[0] = v[0];
varr[numParticles] = v[1];
varr[2*numParticles] = v[2];
}
}
// Execute the B operator when only embryo and other particles interact
__global__ void B_kernel(double *r, double *v, double *m, double *varr, double dt, int numParticles, double *status, double eps) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double dirvec[3];
double invdist;
if (id < numParticles) {
dirvec[0] = r[3] - r[3*id];
dirvec[1] = r[3+1] - r[3*id+1];
dirvec[2] = r[3+2] - r[3*id+2];
invdist = status[id] * dt * rsqrt((dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps));
// update id'th satelitesimal
v[3*id] += m[1] * invdist * dirvec[0];
v[3*id+1] += m[1] * invdist * dirvec[1];
v[3*id+2] += m[1] * invdist * dirvec[2];
// update embryo
// Store forces on embryo for reduction
varr[0] = v[3];
varr[numParticles-1] = 0;
varr[numParticles] = v[4];
varr[2*numParticles-1] = 0;
varr[2*numParticles] = v[5];
varr[3*numParticles-1] = 0;
varr[id-1] = -m[id] * invdist * dirvec[0];
varr[numParticles+id-1] = -m[id] * invdist * dirvec[1];
varr[2*numParticles+id-1] = -m[id] * invdist * dirvec[2];
}
}
__global__ void mergeEject(double *r, double *status, int numParticles, double rH) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double dist;
if (id < numParticles) {
dist = norm3d(r[0]-r[3*id], r[1]-r[3*id+1], r[2]-r[3*id+2]);
if (dist < 0.03*rH && status[id] != 0)
status[id] = 2;
else if (dist > rH && status[id] != 0)
status[id] = 3; // so that momentum conservation doesn't include ejected particles
// will be set to 0 in the consMomentum function
}
}
__global__ void consMomentum(double *v, double *m, double *status, int numParticles, double *rSatellites) {
for (int id = 2; id < numParticles; id++) {
if (status[id] == 2) {
status[id] = 0;
// use conservation of momentum to update central velocity
v[0] = 1./(m[0] + m[id]) * (m[0]*v[0] + m[id]*v[3*id]);
v[1] = 1./(m[0] + m[id]) * (m[0]*v[1] + m[id]*v[3*id+1]);
v[2] = 1./(m[0] + m[id]) * (m[0]*v[2] + m[id]*v[3*id+2]);
// conservation of mass
m[0] += m[id];
}
else if (status[id] == 4) {
status[id] = 0;
rSatellites[0] = cbrt((m[1]+m[2])/m[2])*rSatellites[1];
// use conservation of momentum to update velocity
v[3] = 1./(m[1] + m[id]) * (m[1]*v[3] + m[id]*v[3*id]);
v[4] = 1./(m[1] + m[id]) * (m[1]*v[4] + m[id]*v[3*id+1]);
v[5] = 1./(m[1] + m[id]) * (m[1]*v[5] + m[id]*v[3*id+2]);
// conservation of mass
m[1] += m[id];
}
else if (status[id] == 3)
status[id] = 0;
else
continue;
}
}
__global__ void statusUpdate(double *r, double *v, double *m, double *status, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
m[id/3] *= status[id/3];
r[id] *= status[id/3];
v[id] *= status[id/3];
}
// Function to find
// cross product of two vector array.
__device__ void crossProduct(double *vect_A, double *vect_B, double *cross_P) {
cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1];
cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2];
cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0];
}
__global__ void collision(double* r, double* v, double* status, double* rSatellites, int numParticles, double dt) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double rTemp[3];
double vTemp[3];
double crossP[3];
double vecA[3];
double vecB[3];
double t;
double dist;
double d1;
double d2;
if (id < numParticles) {
// go to rest frame of embryo
vTemp[0] = v[3*id] - v[3];
vTemp[1] = v[3*id+1] - v[4];
vTemp[2] = v[3*id+2] - v[5];
// evolve satelitesimal
rTemp[0] = r[3*id] + vTemp[0] * dt/4.0;
rTemp[1] = r[3*id+1] + vTemp[1] * dt/4.0;
rTemp[2] = r[3*id+2] + vTemp[2] * dt/4.0;
// the equation ((r-r[1]) * (rTemp-r)) / |rTemp-r|^2 where r[1] is the embryo's
// position in its rest frame, r is the satelitesimal's original position and rTemp is the
// satelitesimal's updated position in the rest frame. * indicates a dot product in this case
// this is the time that minimizes the distance function from a line segment to a point
t = -1*((r[3*id]-r[3]) *(rTemp[0]-r[3*id]) +\
(r[3*id+1]-r[4]) *(rTemp[1]-r[3*id+1]) +\
(r[3*id+2]-r[5]) *(rTemp[2]-r[3*id+2])) /\
((rTemp[0]-r[3*id]) *(rTemp[0]-r[3*id]) +\
(rTemp[1]-r[3*id+1])*(rTemp[1]-r[3*id+1]) +\
(rTemp[2]-r[3*id+2])*(rTemp[2]-r[3*id+2]));
if (0 < t < 1) {
// the equation |(r[1]-r) x (r[1]-rTemp)|/|rTemp-r| where r[1] is the embryo's position
// in its rest frame, r is the satelitesimal's original position and rTemp is the
// satelitesimal's updated position in the rest frame
// if t is in this range, then the point in within line segment
vecA[0] = r[3]-r[3*id], vecA[1] = r[4]-r[3*id+1], vecA[2] = r[5]-r[3*id+2];
vecB[0] = r[3]-rTemp[0], vecB[1] = r[4]-rTemp[1], vecB[2] = r[5]-rTemp[2];
crossProduct(vecA, vecB, crossP);
dist = norm3d(crossP[0],crossP[1],crossP[2])*rnorm3d(rTemp[0]-r[3*id], rTemp[1]-r[3*id+1], rTemp[2]-r[3*id+2]);
}
/*else if (t > 1 || t < 0) {
// if t is not in the range, it does not lie within the line segment
// the equation |r-r[1]|
d1 = norm3d(r[3*id]-r[3], r[3*id+1]-r[4], r[3*id+2]-r[5]);
// the equation |rTemp-r[1]|
d2 = norm3d(rTemp[0]-r[3], rTemp[1]-r[4], rTemp[2]-r[5]);
dist = fmin(d1, d2);
}*/
if (dist < rSatellites[0] + rSatellites[1])
status[id] = 4;
}
}
// Find distance
__global__ void calcDist(double *r, double *dist) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
dist[id] = norm3d(r[3*id], r[3*id+1], r[3*id+2]);
}
// Find eccentricity of all particles
__global__ void calcEccentricity(double *r, double *v, double *m, double *ecc, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1;
double L[3]; // angular momentum
double eccTemp[3]; // hold components of eccentricity vector
double mu; // standard gravitational parameter
double invdist; // inverse distance between particle and central planet
if (id < numParticles) {
mu = m[0] + m[id];
invdist = rnorm3d(r[3*id]-r[0], r[3*id+1]-r[1], r[3*id+2]-r[2]);
L[0] = (r[3*id+1]-r[1])*v[3*id+2] - (r[3*id+2]-r[2])*v[3*id+1];
L[1] = (r[3*id+2]-r[2])*v[3*id] - (r[3*id]-r[0])*v[3*id+2];
L[2] = (r[3*id]-r[0])*v[3*id+1] - (r[3*id+1]-r[1])*v[3*id];
eccTemp[0] = (1./mu) * (v[3*id+1]*L[2] - v[3*id+2]*L[1]) - (r[3*id]-r[0]) * invdist;
eccTemp[1] = (1./mu) * (v[3*id+2]*L[0] - v[3*id]*L[2]) - (r[3*id+1]-r[1]) * invdist;
eccTemp[2] = (1./mu) * (v[3*id]*L[1] - v[3*id+1]*L[0]) - (r[3*id+2]-r[2]) * invdist;
ecc[id] = norm3d(eccTemp[0], eccTemp[1], eccTemp[2]); // real eccentricity
}
}
// Reduce last warp (unrolled) in reduction for A2 operator
template <unsigned int blockSize>
__device__ void warpReduce(volatile double* sdata, int tid) {
// All statements evaluated at compile time
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
// Reduction kernel for A2 operator for particle 0
template <unsigned int blockSize>
__global__ void reduce(double *g_idata, double *g_odata, unsigned int n) {
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*__global__ void reduce(double *v, double *varr, int numParticles, int s) {
v[s] = thrust::reduce(thrust::device, &varr[0], &varr[numParticles]);
v[1+s] = thrust::reduce(thrust::device, &varr[numParticles], &varr[2*numParticles]);
v[2+s] = thrust::reduce(thrust::device, &varr[2*numParticles], &varr[3*numParticles]);
}*/
// Function to find
// cross product of two vector array.
void crossProduct2(double *vect_A, double *vect_B, double *cross_P) {
cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1];
cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2];
cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0];
}
// used to calculate the total angular momentum of the system
void linMomentum(double* v, double* m, int numParticles, double *P) {
*P = 0; // angular momentum
double plin[3]; // linear momentum
for (int i = 0; i < numParticles; i++) {
plin[0] += m[i]*v[3*i], plin[1] += m[i]*v[3*i+1], plin[2] += m[i]*v[3*i+2];
*P = sqrt(pow(plin[0], 2) + pow(plin[1], 2) + pow(plin[2], 2));
}
}
void totalMass(double *m, int numParticles, double* M) {
*M = 0;
for (int i = 0; i < numParticles; i++)
*M += m[i];
}
// used to calculate the total angular momentum of the system
void angMomentum(double* r, double* v, double* m, int numParticles, double *L) {
*L = 0;
double Ltemp[3];
double crossP[3]; // store cross product result
double dirvec[3]; // distance from planet
double p[3]; // linear momentum
for (int i = 1; i < numParticles; i++) {
dirvec[0] = -r[0]+r[3*i], dirvec[1] = -r[1]+r[3*i+1], dirvec[2] = -r[2]+r[3*i+2];
p[0] = m[i]*v[3*i], p[1] = m[i]*v[3*i+1], p[2] = m[i]*v[3*i+2];
crossProduct2(dirvec, p, crossP);
Ltemp[0] += crossP[0], Ltemp[1] += crossP[1], Ltemp[2] += crossP[2];
}
*L = sqrt(pow(Ltemp[0], 2) + pow(Ltemp[1], 2) + pow(Ltemp[2], 2));
}
double energynew(double* r, double* v, double* m, int numParticles, double eps) {
double T = 0; // kinetic energy
double U = 0; // potential energy
// to hold the vector that points between particle i and particle j
double* dirvec = (double*)malloc(3 * sizeof(double));
for (int i = 0; i < numParticles; i++) {
T += 0.5 * m[i] * (pow(v[3*i], 2) + pow(v[3*i+1], 2) + pow(v[3*i+2], 2));
if (i > 0) {
for (int k = 0; k < 3; k++)
dirvec[k] = r[k] - r[3*i+k];
U -= m[0] * m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2));
}
if (i > 1) {
for (int k = 0; k < 3; k++)
dirvec[k] = r[3+k] - r[3*i+k];
U -= m[1] * m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2) + eps*eps);
}
}
free(dirvec);
return T + U;
}
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
// Perform the simulation
extern "C" {
void runSim(double *r_h, double *v_h, double *m_h, double dt, int numParticles, int n, double eps, int numSteps, double *ecc_h, double *status_h, double *rSatellites_h, double *dist_h) {
// Declare useful variables
size_t i, j;
const unsigned int warpSize = 32;
size_t N = 3 * numParticles;
size_t N_bytes = N * sizeof(double);
double rH = 5.37e10/8.8605e9; // scaled
double L; double P; double M; double K;
double L0; double P0; double M0; double K0;
double semMjrAxis;
// Make sure the number of particles is multiple of twice the warp size (2*32)
// for efficiency and reduction
if (numParticles % (warpSize) != 0) {
printf("Error: The number of particles must be a multiple of the warp size (32).\n");
return;
}
// Allocate arrays on device
double *r_d, *v_d, *m_d, *ecc_d, *varr_d, *rSatellites_d, *status_d, *vTemp_d, *dist_d;
hipMalloc((void**) &r_d, N_bytes);
hipMalloc((void**) &v_d, N_bytes);
hipMalloc((void**) &m_d, N_bytes/3);
hipMalloc((void**) &varr_d, N_bytes);
hipMalloc((void**) &status_d, N_bytes/3);
hipMalloc((void**) &ecc_d, N_bytes/3);
hipMalloc((void**) &rSatellites_d, 2*sizeof(double));
hipMalloc((void**) &vTemp_d, numParticles/512*sizeof(double));
hipMalloc((void**) &dist_d, N_bytes/3);
// Copy arrays from host to device
hipMemcpy(r_d, r_h, N_bytes, hipMemcpyHostToDevice);
hipMemcpy(v_d, v_h, N_bytes, hipMemcpyHostToDevice);
hipMemcpy(m_d, m_h, N_bytes/3, hipMemcpyHostToDevice);
hipMemcpy(status_d, status_h, N_bytes/3, hipMemcpyHostToDevice);
hipMemcpy(rSatellites_d, rSatellites_h, 2*sizeof(double), hipMemcpyHostToDevice);
//for (i = 0; i < numSteps; i++) {
// One time step
/*for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles);
for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}*/
//}
/*for (i = 0; i < numParticles; i++)
printf("%f\n", status_h[i]);
angMomentum(r_h, v_h, m_h, numParticles, &L0);
linMomentum(v_h, m_h, numParticles, &P0);
totalMass(m_h, numParticles, &M0);
K0 = energynew(r_h, v_h, m_h, numParticles, eps);*/
/*calcEccentricity<<<numParticles/64, 64>>>(r_d, v_d, m_d, ecc_d, numParticles);
hipMemcpy(ecc_h, ecc_d, N_bytes/3, hipMemcpyDeviceToHost);
calcDist<<<numParticles/64, 64>>>(r_d, dist_d);
hipMemcpy(dist_h, dist_d, N_bytes/3, hipMemcpyDeviceToHost);*/
/*for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles);
for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
//hipMemcpy(r_h, r_d, N_bytes, hipMemcpyDeviceToHost);
//hipMemcpy(v_h, v_d, N_bytes, hipMemcpyDeviceToHost);
//hipMemcpy(m_h, m_d, N_bytes/3, hipMemcpyDeviceToHost);
//hipMemcpy(status_h, status_d, N_bytes/3, hipMemcpyDeviceToHost);
//hipMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), hipMemcpyDeviceToHost);
//hipMemcpy(dist_h, dist_d, N_bytes/3, hipMemcpyDeviceToHost);
//angMomentum(r_h, v_h, m_h, numParticles, &L);
//linMomentum(v_h, m_h, numParticles, &P);
//totalMass(m_h, numParticles, &M);
//K = energynew(r_h, v_h, m_h, numParticles, eps);
//semMjrAxis = (m_h[0]+m_h[1])*sqrt(r_h[0]*r_h[0]+r_h[1]*r_h[1]+r_h[2]*r_h[2])/(2*(m_h[0]+m_h[1])-sqrt((r_h[0]-r_h[3])*(r_h[0]-r_h[3])+(r_h[1]-r_h[4])*(r_h[1]-r_h[4])+\
// (r_h[2]-r_h[5])*(r_h[2]-r_h[5]))*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5])*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5]));
//printf("%.15lf %.15lf %.15lf %.15lf %.15lf %.15lf\n", abs((L-L0)/L0), abs((P-P0)/P0), abs((M-M0)/M0), abs((K-K0)/K0), ecc_h[1], semMjrAxis);
}*/
hipLaunchKernelGGL(( calcEccentricity), dim3(numParticles/64), dim3(64), 0, 0, r_d, v_d, m_d, ecc_d, numParticles);
hipLaunchKernelGGL(( calcDist), dim3(numParticles/64), dim3(64), 0, 0, r_d, dist_d);
hipMemcpy(dist_h, dist_d, N_bytes/3, hipMemcpyDeviceToHost);
hipMemcpy(ecc_h, ecc_d, N_bytes/3, hipMemcpyDeviceToHost);
/*for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
A2_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[0], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[1], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[2], numParticles/512);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
}
B_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[3], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[4], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[5], numParticles/512);
for (j = 0; j < n; j++) {
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
A2_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[0], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[1], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[2], numParticles/512);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
}
}*/
/*for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<1, numParticles>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<1, numParticles>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles);
for (j = 0; j < n; j++) {
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<1, numParticles>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
}
}*/
// Copy arrays from device to host
/*hipMemcpy(r_h, r_d, N_bytes, hipMemcpyDeviceToHost);
hipMemcpy(v_h, v_d, N_bytes, hipMemcpyDeviceToHost);
hipMemcpy(m_h, m_d, N_bytes/3, hipMemcpyDeviceToHost);
hipMemcpy(status_h, status_d, N_bytes/3, hipMemcpyDeviceToHost);
hipMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), hipMemcpyDeviceToHost);
int h = 0;
printf("Embryo radius = %.16lf\n", rSatellites_h[0]);
for (int kk = 0; kk < numParticles; kk++) {
if (status_h[kk] == 0) {
printf("Index: %d\n", kk);
printf("New Position\n");
printf("%.16lf %.16lf %.16lf\n", r_h[3*kk], r_h[3*kk+1], r_h[3*kk+2]);
printf("New Velocity\n");
printf("%.16lf %.16lf %.16lf\n", v_h[3*kk], v_h[3*kk+1], v_h[3*kk+2]);
h += 1;
}
}
printf("%d\n", h);
printf("New Mass Planet\n");
printf("%.16lf\n", m_h[0]);
printf("New Velocity Planet\n");
printf("%.16lf %.16lf %.16lf\n", v_h[0], v_h[1], v_h[2]);
printf("New Mass Embryo\n");
printf("%.16lf\n", m_h[1]);
printf("New Velocity Embryo\n");
printf("%.16lf %.16lf %.16lf\n", v_h[3], v_h[4], v_h[5]);
printf("After %d time step(s):\n", numSteps);
printf("r\n");
for (i = 0; i < 9; i += 3)
printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[i+1], r_h[i+2]);
printf("...\n");
for (i = 3*numParticles - 9; i < 3*numParticles; i += 3)
printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[i+1], r_h[i+2]);
printf("\n");
printf("v\n");
for (i = 0; i < 9; i += 3)
printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[i+1], v_h[i+2]);
printf("\n");
printf("...\n");
for (i = 3*numParticles - 9; i < 3*numParticles; i += 3)
printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[i+1], v_h[i+2]);*/
// Free allocated memory on host and device
hipFree(r_d);
hipFree(v_d);
hipFree(m_d);
hipFree(varr_d);
hipFree(status_d);
hipFree(ecc_d);
hipFree(dist_d);
hipFree(rSatellites_d);
}
}
|
90d1c4ca063a240c4d880605c7551c4b9bf0c3b7.cu
|
// runSim.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
// Executes the A1 operator optimized
__global__ void A1_kernel(double* r, double* v, double dt) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
r[id] += v[id] * dt;
}
// Executes the A2 operator
__global__ void A2_kernel(double *r, double *v, double *m, double dt, double *varr, double *status, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1;
double invdist;
double dirvec[3];
if (id < numParticles) {
dirvec[0] = r[0] - r[3*id];
dirvec[1] = r[1] - r[3*id+1];
dirvec[2] = r[2] - r[3*id+2];
// Distance between particle 0 and i
invdist = dt * rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\
rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\
rnorm3d(dirvec[0], dirvec[1], dirvec[2]);
if (status[id] == 0) {
v[3*id] += 0;
v[3*id+1] += 0;
v[3*id+2] += 0;
varr[id] = 0;
varr[numParticles+id] = 0;
varr[2*numParticles+id] = 0;
}
else {
// Update velocities of particles 1 through N-1
v[3*id] += m[0] * invdist * dirvec[0];
v[3*id+1] += m[0] * invdist * dirvec[1];
v[3*id+2] += m[0] * invdist * dirvec[2];
varr[id] = -m[id] * invdist * dirvec[0];
varr[numParticles+id] = -m[id] * invdist * dirvec[1];
varr[2*numParticles+id] = -m[id] * invdist * dirvec[2];
}
varr[0] = v[0];
varr[numParticles] = v[1];
varr[2*numParticles] = v[2];
}
}
// Execute the B operator when only embryo and other particles interact
__global__ void B_kernel(double *r, double *v, double *m, double *varr, double dt, int numParticles, double *status, double eps) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double dirvec[3];
double invdist;
if (id < numParticles) {
dirvec[0] = r[3] - r[3*id];
dirvec[1] = r[3+1] - r[3*id+1];
dirvec[2] = r[3+2] - r[3*id+2];
invdist = status[id] * dt * rsqrt((dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\
(dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps));
// update id'th satelitesimal
v[3*id] += m[1] * invdist * dirvec[0];
v[3*id+1] += m[1] * invdist * dirvec[1];
v[3*id+2] += m[1] * invdist * dirvec[2];
// update embryo
// Store forces on embryo for reduction
varr[0] = v[3];
varr[numParticles-1] = 0;
varr[numParticles] = v[4];
varr[2*numParticles-1] = 0;
varr[2*numParticles] = v[5];
varr[3*numParticles-1] = 0;
varr[id-1] = -m[id] * invdist * dirvec[0];
varr[numParticles+id-1] = -m[id] * invdist * dirvec[1];
varr[2*numParticles+id-1] = -m[id] * invdist * dirvec[2];
}
}
__global__ void mergeEject(double *r, double *status, int numParticles, double rH) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double dist;
if (id < numParticles) {
dist = norm3d(r[0]-r[3*id], r[1]-r[3*id+1], r[2]-r[3*id+2]);
if (dist < 0.03*rH && status[id] != 0)
status[id] = 2;
else if (dist > rH && status[id] != 0)
status[id] = 3; // so that momentum conservation doesn't include ejected particles
// will be set to 0 in the consMomentum function
}
}
__global__ void consMomentum(double *v, double *m, double *status, int numParticles, double *rSatellites) {
for (int id = 2; id < numParticles; id++) {
if (status[id] == 2) {
status[id] = 0;
// use conservation of momentum to update central velocity
v[0] = 1./(m[0] + m[id]) * (m[0]*v[0] + m[id]*v[3*id]);
v[1] = 1./(m[0] + m[id]) * (m[0]*v[1] + m[id]*v[3*id+1]);
v[2] = 1./(m[0] + m[id]) * (m[0]*v[2] + m[id]*v[3*id+2]);
// conservation of mass
m[0] += m[id];
}
else if (status[id] == 4) {
status[id] = 0;
rSatellites[0] = cbrt((m[1]+m[2])/m[2])*rSatellites[1];
// use conservation of momentum to update velocity
v[3] = 1./(m[1] + m[id]) * (m[1]*v[3] + m[id]*v[3*id]);
v[4] = 1./(m[1] + m[id]) * (m[1]*v[4] + m[id]*v[3*id+1]);
v[5] = 1./(m[1] + m[id]) * (m[1]*v[5] + m[id]*v[3*id+2]);
// conservation of mass
m[1] += m[id];
}
else if (status[id] == 3)
status[id] = 0;
else
continue;
}
}
__global__ void statusUpdate(double *r, double *v, double *m, double *status, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
m[id/3] *= status[id/3];
r[id] *= status[id/3];
v[id] *= status[id/3];
}
// Function to find
// cross product of two vector array.
__device__ void crossProduct(double *vect_A, double *vect_B, double *cross_P) {
cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1];
cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2];
cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0];
}
__global__ void collision(double* r, double* v, double* status, double* rSatellites, int numParticles, double dt) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2;
double rTemp[3];
double vTemp[3];
double crossP[3];
double vecA[3];
double vecB[3];
double t;
double dist;
double d1;
double d2;
if (id < numParticles) {
// go to rest frame of embryo
vTemp[0] = v[3*id] - v[3];
vTemp[1] = v[3*id+1] - v[4];
vTemp[2] = v[3*id+2] - v[5];
// evolve satelitesimal
rTemp[0] = r[3*id] + vTemp[0] * dt/4.0;
rTemp[1] = r[3*id+1] + vTemp[1] * dt/4.0;
rTemp[2] = r[3*id+2] + vTemp[2] * dt/4.0;
// the equation ((r-r[1]) * (rTemp-r)) / |rTemp-r|^2 where r[1] is the embryo's
// position in its rest frame, r is the satelitesimal's original position and rTemp is the
// satelitesimal's updated position in the rest frame. * indicates a dot product in this case
// this is the time that minimizes the distance function from a line segment to a point
t = -1*((r[3*id]-r[3]) *(rTemp[0]-r[3*id]) +\
(r[3*id+1]-r[4]) *(rTemp[1]-r[3*id+1]) +\
(r[3*id+2]-r[5]) *(rTemp[2]-r[3*id+2])) /\
((rTemp[0]-r[3*id]) *(rTemp[0]-r[3*id]) +\
(rTemp[1]-r[3*id+1])*(rTemp[1]-r[3*id+1]) +\
(rTemp[2]-r[3*id+2])*(rTemp[2]-r[3*id+2]));
if (0 < t < 1) {
// the equation |(r[1]-r) x (r[1]-rTemp)|/|rTemp-r| where r[1] is the embryo's position
// in its rest frame, r is the satelitesimal's original position and rTemp is the
// satelitesimal's updated position in the rest frame
// if t is in this range, then the point in within line segment
vecA[0] = r[3]-r[3*id], vecA[1] = r[4]-r[3*id+1], vecA[2] = r[5]-r[3*id+2];
vecB[0] = r[3]-rTemp[0], vecB[1] = r[4]-rTemp[1], vecB[2] = r[5]-rTemp[2];
crossProduct(vecA, vecB, crossP);
dist = norm3d(crossP[0],crossP[1],crossP[2])*rnorm3d(rTemp[0]-r[3*id], rTemp[1]-r[3*id+1], rTemp[2]-r[3*id+2]);
}
/*else if (t > 1 || t < 0) {
// if t is not in the range, it does not lie within the line segment
// the equation |r-r[1]|
d1 = norm3d(r[3*id]-r[3], r[3*id+1]-r[4], r[3*id+2]-r[5]);
// the equation |rTemp-r[1]|
d2 = norm3d(rTemp[0]-r[3], rTemp[1]-r[4], rTemp[2]-r[5]);
dist = fmin(d1, d2);
}*/
if (dist < rSatellites[0] + rSatellites[1])
status[id] = 4;
}
}
// Find distance
__global__ void calcDist(double *r, double *dist) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
dist[id] = norm3d(r[3*id], r[3*id+1], r[3*id+2]);
}
// Find eccentricity of all particles
__global__ void calcEccentricity(double *r, double *v, double *m, double *ecc, int numParticles) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1;
double L[3]; // angular momentum
double eccTemp[3]; // hold components of eccentricity vector
double mu; // standard gravitational parameter
double invdist; // inverse distance between particle and central planet
if (id < numParticles) {
mu = m[0] + m[id];
invdist = rnorm3d(r[3*id]-r[0], r[3*id+1]-r[1], r[3*id+2]-r[2]);
L[0] = (r[3*id+1]-r[1])*v[3*id+2] - (r[3*id+2]-r[2])*v[3*id+1];
L[1] = (r[3*id+2]-r[2])*v[3*id] - (r[3*id]-r[0])*v[3*id+2];
L[2] = (r[3*id]-r[0])*v[3*id+1] - (r[3*id+1]-r[1])*v[3*id];
eccTemp[0] = (1./mu) * (v[3*id+1]*L[2] - v[3*id+2]*L[1]) - (r[3*id]-r[0]) * invdist;
eccTemp[1] = (1./mu) * (v[3*id+2]*L[0] - v[3*id]*L[2]) - (r[3*id+1]-r[1]) * invdist;
eccTemp[2] = (1./mu) * (v[3*id]*L[1] - v[3*id+1]*L[0]) - (r[3*id+2]-r[2]) * invdist;
ecc[id] = norm3d(eccTemp[0], eccTemp[1], eccTemp[2]); // real eccentricity
}
}
// Reduce last warp (unrolled) in reduction for A2 operator
template <unsigned int blockSize>
__device__ void warpReduce(volatile double* sdata, int tid) {
// All statements evaluated at compile time
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
// Reduction kernel for A2 operator for particle 0
template <unsigned int blockSize>
__global__ void reduce(double *g_idata, double *g_odata, unsigned int n) {
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/*__global__ void reduce(double *v, double *varr, int numParticles, int s) {
v[s] = thrust::reduce(thrust::device, &varr[0], &varr[numParticles]);
v[1+s] = thrust::reduce(thrust::device, &varr[numParticles], &varr[2*numParticles]);
v[2+s] = thrust::reduce(thrust::device, &varr[2*numParticles], &varr[3*numParticles]);
}*/
// Function to find
// cross product of two vector array.
void crossProduct2(double *vect_A, double *vect_B, double *cross_P) {
cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1];
cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2];
cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0];
}
// used to calculate the total angular momentum of the system
void linMomentum(double* v, double* m, int numParticles, double *P) {
*P = 0; // angular momentum
double plin[3]; // linear momentum
for (int i = 0; i < numParticles; i++) {
plin[0] += m[i]*v[3*i], plin[1] += m[i]*v[3*i+1], plin[2] += m[i]*v[3*i+2];
*P = sqrt(pow(plin[0], 2) + pow(plin[1], 2) + pow(plin[2], 2));
}
}
void totalMass(double *m, int numParticles, double* M) {
*M = 0;
for (int i = 0; i < numParticles; i++)
*M += m[i];
}
// used to calculate the total angular momentum of the system
void angMomentum(double* r, double* v, double* m, int numParticles, double *L) {
*L = 0;
double Ltemp[3];
double crossP[3]; // store cross product result
double dirvec[3]; // distance from planet
double p[3]; // linear momentum
for (int i = 1; i < numParticles; i++) {
dirvec[0] = -r[0]+r[3*i], dirvec[1] = -r[1]+r[3*i+1], dirvec[2] = -r[2]+r[3*i+2];
p[0] = m[i]*v[3*i], p[1] = m[i]*v[3*i+1], p[2] = m[i]*v[3*i+2];
crossProduct2(dirvec, p, crossP);
Ltemp[0] += crossP[0], Ltemp[1] += crossP[1], Ltemp[2] += crossP[2];
}
*L = sqrt(pow(Ltemp[0], 2) + pow(Ltemp[1], 2) + pow(Ltemp[2], 2));
}
double energynew(double* r, double* v, double* m, int numParticles, double eps) {
double T = 0; // kinetic energy
double U = 0; // potential energy
// to hold the vector that points between particle i and particle j
double* dirvec = (double*)malloc(3 * sizeof(double));
for (int i = 0; i < numParticles; i++) {
T += 0.5 * m[i] * (pow(v[3*i], 2) + pow(v[3*i+1], 2) + pow(v[3*i+2], 2));
if (i > 0) {
for (int k = 0; k < 3; k++)
dirvec[k] = r[k] - r[3*i+k];
U -= m[0] * m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2));
}
if (i > 1) {
for (int k = 0; k < 3; k++)
dirvec[k] = r[3+k] - r[3*i+k];
U -= m[1] * m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2) + eps*eps);
}
}
free(dirvec);
return T + U;
}
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
// Perform the simulation
extern "C" {
void runSim(double *r_h, double *v_h, double *m_h, double dt, int numParticles, int n, double eps, int numSteps, double *ecc_h, double *status_h, double *rSatellites_h, double *dist_h) {
// Declare useful variables
size_t i, j;
const unsigned int warpSize = 32;
size_t N = 3 * numParticles;
size_t N_bytes = N * sizeof(double);
double rH = 5.37e10/8.8605e9; // scaled
double L; double P; double M; double K;
double L0; double P0; double M0; double K0;
double semMjrAxis;
// Make sure the number of particles is multiple of twice the warp size (2*32)
// for efficiency and reduction
if (numParticles % (warpSize) != 0) {
printf("Error: The number of particles must be a multiple of the warp size (32).\n");
return;
}
// Allocate arrays on device
double *r_d, *v_d, *m_d, *ecc_d, *varr_d, *rSatellites_d, *status_d, *vTemp_d, *dist_d;
cudaMalloc((void**) &r_d, N_bytes);
cudaMalloc((void**) &v_d, N_bytes);
cudaMalloc((void**) &m_d, N_bytes/3);
cudaMalloc((void**) &varr_d, N_bytes);
cudaMalloc((void**) &status_d, N_bytes/3);
cudaMalloc((void**) &ecc_d, N_bytes/3);
cudaMalloc((void**) &rSatellites_d, 2*sizeof(double));
cudaMalloc((void**) &vTemp_d, numParticles/512*sizeof(double));
cudaMalloc((void**) &dist_d, N_bytes/3);
// Copy arrays from host to device
cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(m_d, m_h, N_bytes/3, cudaMemcpyHostToDevice);
cudaMemcpy(status_d, status_h, N_bytes/3, cudaMemcpyHostToDevice);
cudaMemcpy(rSatellites_d, rSatellites_h, 2*sizeof(double), cudaMemcpyHostToDevice);
//for (i = 0; i < numSteps; i++) {
// One time step
/*for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles);
for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}*/
//}
/*for (i = 0; i < numParticles; i++)
printf("%f\n", status_h[i]);
angMomentum(r_h, v_h, m_h, numParticles, &L0);
linMomentum(v_h, m_h, numParticles, &P0);
totalMass(m_h, numParticles, &M0);
K0 = energynew(r_h, v_h, m_h, numParticles, eps);*/
/*calcEccentricity<<<numParticles/64, 64>>>(r_d, v_d, m_d, ecc_d, numParticles);
cudaMemcpy(ecc_h, ecc_d, N_bytes/3, cudaMemcpyDeviceToHost);
calcDist<<<numParticles/64, 64>>>(r_d, dist_d);
cudaMemcpy(dist_h, dist_d, N_bytes/3, cudaMemcpyDeviceToHost);*/
/*for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles);
for (j = 0; j < n; j++) {
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d);
statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles);
}
//cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost);
//cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost);
//cudaMemcpy(m_h, m_d, N_bytes/3, cudaMemcpyDeviceToHost);
//cudaMemcpy(status_h, status_d, N_bytes/3, cudaMemcpyDeviceToHost);
//cudaMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(dist_h, dist_d, N_bytes/3, cudaMemcpyDeviceToHost);
//angMomentum(r_h, v_h, m_h, numParticles, &L);
//linMomentum(v_h, m_h, numParticles, &P);
//totalMass(m_h, numParticles, &M);
//K = energynew(r_h, v_h, m_h, numParticles, eps);
//semMjrAxis = (m_h[0]+m_h[1])*sqrt(r_h[0]*r_h[0]+r_h[1]*r_h[1]+r_h[2]*r_h[2])/(2*(m_h[0]+m_h[1])-sqrt((r_h[0]-r_h[3])*(r_h[0]-r_h[3])+(r_h[1]-r_h[4])*(r_h[1]-r_h[4])+\
// (r_h[2]-r_h[5])*(r_h[2]-r_h[5]))*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5])*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5]));
//printf("%.15lf %.15lf %.15lf %.15lf %.15lf %.15lf\n", abs((L-L0)/L0), abs((P-P0)/P0), abs((M-M0)/M0), abs((K-K0)/K0), ecc_h[1], semMjrAxis);
}*/
calcEccentricity<<<numParticles/64, 64>>>(r_d, v_d, m_d, ecc_d, numParticles);
calcDist<<<numParticles/64, 64>>>(r_d, dist_d);
cudaMemcpy(dist_h, dist_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(ecc_h, ecc_d, N_bytes/3, cudaMemcpyDeviceToHost);
/*for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
A2_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[0], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[1], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[2], numParticles/512);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
}
B_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[3], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[4], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[5], numParticles/512);
for (j = 0; j < n; j++) {
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
A2_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[0], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[1], numParticles/512);
reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles);
//reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[2], numParticles/512);
A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n));
}
}*/
/*for (i = 0; i < numSteps; i++) {
// One time step
for (j = 0; j < n; j++) {
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<1, numParticles>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
}
B_kernel<<<1, numParticles>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles);
for (j = 0; j < n; j++) {
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A2_kernel<<<1, numParticles>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles);
reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles);
collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n));
mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH);
consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d);
statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles);
}
}*/
// Copy arrays from device to host
/*cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(status_h, status_d, N_bytes/3, cudaMemcpyDeviceToHost);
cudaMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), cudaMemcpyDeviceToHost);
int h = 0;
printf("Embryo radius = %.16lf\n", rSatellites_h[0]);
for (int kk = 0; kk < numParticles; kk++) {
if (status_h[kk] == 0) {
printf("Index: %d\n", kk);
printf("New Position\n");
printf("%.16lf %.16lf %.16lf\n", r_h[3*kk], r_h[3*kk+1], r_h[3*kk+2]);
printf("New Velocity\n");
printf("%.16lf %.16lf %.16lf\n", v_h[3*kk], v_h[3*kk+1], v_h[3*kk+2]);
h += 1;
}
}
printf("%d\n", h);
printf("New Mass Planet\n");
printf("%.16lf\n", m_h[0]);
printf("New Velocity Planet\n");
printf("%.16lf %.16lf %.16lf\n", v_h[0], v_h[1], v_h[2]);
printf("New Mass Embryo\n");
printf("%.16lf\n", m_h[1]);
printf("New Velocity Embryo\n");
printf("%.16lf %.16lf %.16lf\n", v_h[3], v_h[4], v_h[5]);
printf("After %d time step(s):\n", numSteps);
printf("r\n");
for (i = 0; i < 9; i += 3)
printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[i+1], r_h[i+2]);
printf("...\n");
for (i = 3*numParticles - 9; i < 3*numParticles; i += 3)
printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[i+1], r_h[i+2]);
printf("\n");
printf("v\n");
for (i = 0; i < 9; i += 3)
printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[i+1], v_h[i+2]);
printf("\n");
printf("...\n");
for (i = 3*numParticles - 9; i < 3*numParticles; i += 3)
printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[i+1], v_h[i+2]);*/
// Free allocated memory on host and device
cudaFree(r_d);
cudaFree(v_d);
cudaFree(m_d);
cudaFree(varr_d);
cudaFree(status_d);
cudaFree(ecc_d);
cudaFree(dist_d);
cudaFree(rSatellites_d);
}
}
|
db3f6eabb41f0cce700fe7defee4603b3563d2b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////using more than 8gb.
//typedef unsigned char byte;
void shuffle(long long int *array, long long int n)
{
if (n > 1){
long long int i;
for (i = 0; i < n - 1; i++){
long long int j = i + rand() / (RAND_MAX / (n - i) + 1);
long long int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){
if(1){////////////normal
for (unsigned i = 0; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
for (unsigned i = 7; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
A[size - stride]=0;
A[size - stride + 7]=7;
}
if(0){////////////reversed
for (unsigned i = 0; i <= size - stride; i = i + stride){
A[i]=(i - stride);
}
for (unsigned i = 7; i <= size - stride + 7; i = i + stride){
A[i]=(i - stride);
}
A[0]=size - stride;
A[7]=size - stride + 7;
}
if(0){////////////random
long long int *rand_sequence;
rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations);
//////random sequence offset 0
for(long long int i = 0; i < iterations; i++){
rand_sequence[i] = i;
}
//srand (time(NULL));
srand(1);
shuffle(rand_sequence, iterations);
long long int previous_rand_num;
long long int rand_num = rand_sequence[0] * stride;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning
//////random sequence offset 7
//for(int i = 0; i < iterations; i++){
// rand_sequence[i] = i;
//}
//srand (time(NULL));
//shuffle(rand_sequence, iterations);
rand_num = rand_sequence[0] * stride + 7;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride + 7;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning
}
/*
///////manually set the nodes
A[32]=104333344;
A[104333344]=200802336;
A[200802336]=353370144;
A[353370144]=372244512;
A[372244512]=110100512;
A[110100512]=182452256;
A[182452256]=333971488;
A[333971488]=225443872;
A[225443872]=155189280;
A[155189280]=104333344;
*/
}
__device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//unsigned start_time = 0;//////clock
//unsigned end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//unsigned total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, unsigned *A, unsigned iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ unsigned s_index[1024 * 4];
//__shared__ int s_index[1];
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
/*//////////////this will cause a problem, support only up to 1073741824, see the cvts.
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (unsigned it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.u64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.u32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
*/
//printf("#####################%d\n", A[1073741824]);
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (unsigned it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
hipDeviceProp_t device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == hipComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out;
checkCudaErrors(hipMalloc(&GPU_data_out, sizeof(unsigned) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
unsigned counter = 0;
for(unsigned data_stride = 2 * 256 * 1024; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(unsigned mod2 = 2 * 256 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;///when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb? (cache)
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2147483648;
if(mod > 3221225472){
mod = 3221225472;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2684354560;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(hipMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(hipMemAdvise(CPU_data_in, sizeof(int) * data_size, hipMemAdviseSetPreferredLocation, hipCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(hipMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//hipMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, hipMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(hipMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(hipMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
hipLaunchKernelGGL(( tlb_latency_test), dim3(1), dim3(1), 0, 0, CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
hipDeviceSynchronize();
hipMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, hipMemcpyDeviceToHost);
hipMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, hipMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(hipFree(GPU_data_out_index));
checkCudaErrors(hipFree(GPU_data_out_time));
//checkCudaErrors(hipFree(GPU_data_in));
checkCudaErrors(hipFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(hipFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
db3f6eabb41f0cce700fe7defee4603b3563d2b7.cu
|
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <stdlib.h>
// utilities
#include <helper_cuda.h>
#include <time.h>
///////////per request timing. L1 enabled. P100.
///////////using more than 8gb.
//typedef unsigned char byte;
void shuffle(long long int *array, long long int n)
{
if (n > 1){
long long int i;
for (i = 0; i < n - 1; i++){
long long int j = i + rand() / (RAND_MAX / (n - i) + 1);
long long int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
void init_cpu_data(unsigned *A, unsigned size, unsigned stride, unsigned mod, long long int iterations){
if(1){////////////normal
for (unsigned i = 0; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
for (unsigned i = 7; i < size - stride; i = i + stride){
A[i]=(i + stride);
}
A[size - stride]=0;
A[size - stride + 7]=7;
}
if(0){////////////reversed
for (unsigned i = 0; i <= size - stride; i = i + stride){
A[i]=(i - stride);
}
for (unsigned i = 7; i <= size - stride + 7; i = i + stride){
A[i]=(i - stride);
}
A[0]=size - stride;
A[7]=size - stride + 7;
}
if(0){////////////random
long long int *rand_sequence;
rand_sequence = (long long int*)malloc(sizeof(long long int) * iterations);
//////random sequence offset 0
for(long long int i = 0; i < iterations; i++){
rand_sequence[i] = i;
}
//srand (time(NULL));
srand(1);
shuffle(rand_sequence, iterations);
long long int previous_rand_num;
long long int rand_num = rand_sequence[0] * stride;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride);////back to beginning
//////random sequence offset 7
//for(int i = 0; i < iterations; i++){
// rand_sequence[i] = i;
//}
//srand (time(NULL));
//shuffle(rand_sequence, iterations);
rand_num = rand_sequence[0] * stride + 7;
for(long long int i = 1; i < iterations; i++){
previous_rand_num = rand_num;
rand_num = rand_sequence[i] * stride + 7;
A[previous_rand_num]=(unsigned)rand_num;
}
A[rand_num]=(unsigned)(rand_sequence[0] * stride + 7);////back to beginning
}
/*
///////manually set the nodes
A[32]=104333344;
A[104333344]=200802336;
A[200802336]=353370144;
A[353370144]=372244512;
A[372244512]=110100512;
A[110100512]=182452256;
A[182452256]=333971488;
A[333971488]=225443872;
A[225443872]=155189280;
A[155189280]=104333344;
*/
}
__device__ void P_chasing0(int mark, unsigned *A, int iterations, int *B, int *C, unsigned *D, int starting_index, float clock_rate, int data_stride){
int j = starting_index;/////make them in the same page, and miss near in cache lines
for (int it = 0; it < iterations; it++){
j = A[j];
}
B[0] = j;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing1(int mark, unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, unsigned data_stride){
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//unsigned start_time = 0;//////clock
//unsigned end_time = 0;//////clock
//start_time = clock64();//////clock
for (long long int it = 0; it < iterations; it++){
j = A[j];
}
//end_time=clock64();//////clock
//unsigned total_time = end_time - start_time;//////clock
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency //////////the print will flush the L1?! (
B[0] = j;
//B[1] = (int) total_time;
}
//////////min page size 4kb = 4096b = 32 * 128.
__device__ void P_chasing2(int mark, unsigned *A, unsigned iterations, unsigned *B, unsigned *C, long long int *D, unsigned starting_index, float clock_rate, int data_stride){//////what is the effect of warmup outside vs inside?
//////shared memory: 0xc000 max (49152 Bytes = 48KB)
__shared__ long long int s_tvalue[1024 * 4];/////must be enough to contain the number of iterations.
__shared__ unsigned s_index[1024 * 4];
//__shared__ int s_index[1];
unsigned j = starting_index;/////make them in the same page, and miss near in cache lines
//int j = B[0];
long long int start_time = 0;//////clock
long long int end_time = 0;//////clock
long long int time_interval = 0;//////clock
//long long int total_time = end_time - start_time;//////clock
/*
for (int it = 0; it < iterations; it++){
start_time = clock64();//////clock
j = A[j];
//s_index[it] = j;
end_time=clock64();//////clock
s_tvalue[it] = end_time - start_time;
}
*/
asm(".reg .u64 t1;\n\t"
".reg .u64 t2;\n\t");
for (long long int it = 0; it < iterations; it++){
/*
asm("mul.wide.u32 t1, %3, %5;\n\t"
"add.u64 t2, t1, %4;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
*/
asm("mul.wide.u32 t1, %2, %4;\n\t"
"add.u64 t2, t1, %3;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %1, [t2];\n\t"
: "=l"(start_time), "=r"(j) : "r"(j), "l"(A), "r"(4));
s_index[it] = j;////what if without this? ///Then it is not accurate and cannot get the access time at all, due to the ILP. (another way is to use average time, but inevitably containing other instructions:setp, add).
asm volatile ("mov.u64 %0, %clock64;": "=l"(end_time));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
/*//////////////this will cause a problem, support only up to 1073741824, see the cvts.
asm(".reg .u32 t1;\n\t"
".reg .u64 t2;\n\t"
".reg .u32 t3;\n\t"
".reg .u32 t4;\n\t"
".reg .u64 t5;\n\t"
".reg .u32 t6;\n\t"
".reg .u64 t7;\n\t"
"cvta.to.shared.u64 t5, %0;\n\t"
"cvt.u32.u64 t6, t5;\n\t"
:: "l"(s_index));////////////////////////////////////cvta.to.global.u64 %rd4, %rd25; needed??
for (unsigned it = 0; it < iterations; it++){//////////it here is limited by the size of the shared memory
asm("shl.b32 t1, %3, 2;\n\t"
"cvt.u64.u32 t7, t1;\n\t"
"add.u64 t2, t7, %4;\n\t"
"shl.b32 t3, %6, 2;\n\t"
"add.u32 t4, t3, t6;\n\t"
"mov.u64 %0, %clock64;\n\t"
"ld.global.u32 %2, [t2];\n\t"
"st.shared.u32 [t4], %2;\n\t"
"mov.u64 %1, %clock64;"
: "=l"(start_time), "=l"(end_time), "=r"(j) : "r"(j), "l"(A), "l"(s_index), "r"(it));
time_interval = end_time - start_time;
//if(it >= 4 * 1024){
s_tvalue[it] = time_interval;
//}
}
*/
//printf("#####################%d\n", A[1073741824]);
//printf("inside%d:%fms\n", mark, (total_time / (float)clock_rate) / ((float)iterations));//////clock, average latency
B[0] = j;
for (unsigned it = 0; it < iterations; it++){
C[it] = s_index[it];
D[it] = s_tvalue[it];
}
}
__global__ void tlb_latency_test(unsigned *A, long long int iterations, unsigned *B, unsigned *C, long long int *D, float clock_rate, unsigned mod, int data_stride){
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
///////////kepler L2 has 48 * 1024 = 49152 cache lines. But we only have 1024 * 4 slots in shared memory.
//P_chasing1(0, A, iterations + 0, B, C, D, 0, clock_rate, data_stride);////////saturate the L2
P_chasing2(0, A, reduced_iter, B, C, D, 0, clock_rate, data_stride);////////partially print the data
__syncthreads();
}
int main(int argc, char **argv)
{
printf("\n");
// set device
cudaDeviceProp device_prop;
//int dev_id = findCudaDevice(argc, (const char **) argv);
int dev_id = 0;
checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id));
int peak_clk = 1;//kHz
checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id));
float clock_rate = (float) peak_clk;
//printf("clock_rate_out_kernel:%f\n", clock_rate);
if (!device_prop.managedMemory) {
// This samples requires being run on a device that supports Unified Memory
fprintf(stderr, "Unified Memory not supported on this device\n");
exit(EXIT_WAIVED);
}
if (device_prop.computeMode == cudaComputeModeProhibited)
{
// This sample requires being run with a default or process exclusive mode
fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n");
exit(EXIT_WAIVED);
}
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out;
checkCudaErrors(cudaMalloc(&GPU_data_out, sizeof(unsigned) * 2));
FILE * pFile;
pFile = fopen ("output.txt","w");
unsigned counter = 0;
for(unsigned data_stride = 2 * 256 * 1024; data_stride <= 2 * 256 * 1024; data_stride = data_stride * 2){/////////32mb stride
//data_stride = data_stride + 32;///offset a cache line, trying to cause L2 miss but tlb hit.
//printf("###################data_stride%d#########################\n", data_stride);
//for(int mod = 1024 * 256 * 2; mod > 0; mod = mod - 32 * 1024){/////kepler L2 1.5m = 12288 cache lines, L1 16k = 128 cache lines.
for(unsigned mod2 = 2 * 256 * 1024; mod2 <= 1073741824; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = mod2;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;///when size gets larger than 32MB(8388608), an additional latency is added. Is it prefetching? cpu cache or tlb? (cache)
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2147483648;
if(mod > 3221225472){
mod = 3221225472;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
for(unsigned mod2 = 1; mod2 <= 1; mod2 = mod2 * 2){////268435456 = 1gb, 536870912 = 2gb, 1073741824 = 4gb, 2147483648 = 8gb, 4294967296 = 16gb.
counter++;
///////////////////////////////////////////////////////////////////CPU data begin
//int data_size = 2 * 256 * 1024 * 32;/////size = iteration * stride = 32 2mb pages.
unsigned mod = 2684354560;
if(mod > 2684354560){
mod = 2684354560;
}
//unsigned data_size = 2684354560;
unsigned data_size = mod;
if(data_size < 4194304){//////////data size at least 16mb to prevent L2 prefetch
data_size = 4194304;
}
//int iterations = data_size / data_stride;
//int iterations = 1024 * 256 * 8;
long long int iterations = mod / data_stride;////32 * 32 * 4 / 32 * 2 = 256
unsigned *CPU_data_in;
CPU_data_in = (unsigned*)malloc(sizeof(unsigned) * data_size);
checkCudaErrors(cudaMallocManaged(&CPU_data_in, sizeof(int) * data_size));/////////////using unified memory
checkCudaErrors(cudaMemAdvise(CPU_data_in, sizeof(int) * data_size, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId));///////////////using hint
init_cpu_data(CPU_data_in, data_size, data_stride, mod, iterations);
long long int reduced_iter = iterations;
if(reduced_iter > 4096){
reduced_iter = 4096;
}else if(reduced_iter < 16){
//reduced_iter = 16;
}
unsigned *CPU_data_out_index;
CPU_data_out_index = (unsigned*)malloc(sizeof(unsigned) * reduced_iter);
long long int *CPU_data_out_time;
CPU_data_out_time = (long long int*)malloc(sizeof(long long int) * reduced_iter);
///////////////////////////////////////////////////////////////////CPU data end
///////////////////////////////////////////////////////////////////GPU data in
//unsigned *GPU_data_in;
//checkCudaErrors(cudaMalloc(&GPU_data_in, sizeof(unsigned) * data_size));
//cudaMemcpy(GPU_data_in, CPU_data_in, sizeof(unsigned) * data_size, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////GPU data out
unsigned *GPU_data_out_index;
checkCudaErrors(cudaMalloc(&GPU_data_out_index, sizeof(unsigned) * reduced_iter));
long long int *GPU_data_out_time;
checkCudaErrors(cudaMalloc(&GPU_data_out_time, sizeof(long long int) * reduced_iter));
tlb_latency_test<<<1, 1>>>(CPU_data_in, iterations, GPU_data_out, GPU_data_out_index, GPU_data_out_time, clock_rate, mod, data_stride);///////////////kernel is here
cudaDeviceSynchronize();
cudaMemcpy(CPU_data_out_index, GPU_data_out_index, sizeof(unsigned) * reduced_iter, cudaMemcpyDeviceToHost);
cudaMemcpy(CPU_data_out_time, GPU_data_out_time, sizeof(long long int) * reduced_iter, cudaMemcpyDeviceToHost);
fprintf(pFile, "###################data_stride%d#########################\n", data_stride);
fprintf (pFile, "###############Mod%u##############%lld\n", mod, iterations);
for (long long int it = 0; it < reduced_iter; it++){
fprintf (pFile, "%u %fms %lldcycles\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate, CPU_data_out_time[it]);
//fprintf (pFile, "%d %fms\n", it, CPU_data_out_time[it] / (float)clock_rate);
//printf ("%d %fms\n", CPU_data_out_index[it], CPU_data_out_time[it] / (float)clock_rate);
}
checkCudaErrors(cudaFree(GPU_data_out_index));
checkCudaErrors(cudaFree(GPU_data_out_time));
//checkCudaErrors(cudaFree(GPU_data_in));
checkCudaErrors(cudaFree(CPU_data_in));
//free(CPU_data_in);
free(CPU_data_out_index);
free(CPU_data_out_time);
}
//printf("############################################\n\n");
}
checkCudaErrors(cudaFree(GPU_data_out));
//free(CPU_data_out);
fclose (pFile);
exit(EXIT_SUCCESS);
}
|
ef96bfdf963903a686bfc6fd492c4cd6a4435de2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace crfasrnn_caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace crfasrnn_caffe
|
ef96bfdf963903a686bfc6fd492c4cd6a4435de2.cu
|
// TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
namespace crfasrnn_caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace crfasrnn_caffe
|
0b3653055367589c89b8d9a1e066e6044fe079fd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <gpu_planning_tracepoints/tracepoints.hpp>
#include <string>
#include "cuda_util.hpp"
#include "map.hpp"
namespace gpu_planning {
__host__ __device__ Cell::Cell() : value{0.f}, id{0} {}
__host__ __device__ Cell::Cell(float value, uint8_t id)
: value{value}, id{id} {}
__host__ __device__ Map::Map() : data_{nullptr}, resolution_{0} {}
__host__ __device__ Map::Map(Array2d<Cell>* data, size_t resolution)
: data_{data}, resolution_{resolution} {}
__host__ __device__ float Map::width() const {
return static_cast<float>(data_->width()) / resolution_;
}
__host__ __device__ float Map::height() const {
return static_cast<float>(data_->height()) / resolution_;
}
__host__ __device__ size_t Map::resolution() const { return resolution_; }
__host__ __device__ Array2d<Cell>* Map::data() const { return data_; }
__host__ __device__ Position<size_t> Map::to_index(
const Position<float>& position) const {
return Position<size_t>(position.x * resolution_, position.y * resolution_);
}
__host__ __device__ Pose<size_t> Map::to_index(const Pose<float>& pose) const {
return Pose<size_t>(to_index(pose.position), pose.orientation);
}
__host__ __device__ Position<float> Map::from_index(
const Position<size_t>& index) const {
return Position<float>(static_cast<float>(index.x) / resolution_,
static_cast<float>(index.y) / resolution_);
}
__host__ __device__ Pose<float> Map::from_index(
const Pose<size_t>& index) const {
return Pose<float>(from_index(index.position), index.orientation);
}
__host__ __device__ const Cell& Map::get(const Position<float>& position) {
return data_->at(to_index(position));
}
HostMap::HostMap() : map_storage_{}, map_array_{}, log_{nullptr} {}
HostMap::HostMap(size_t cell_width, size_t cell_height, size_t resolution,
Logger* log)
: map_storage_{cell_width * cell_height},
map_array_{map_storage_.data(), cell_width, cell_height,
cell_width * sizeof(Cell)},
log_{log} {
data_ = &map_array_;
resolution_ = resolution;
}
DeviceMap::DeviceMap() : map_{nullptr}, data_{}, resolution_{}, log_{nullptr} {}
DeviceMap::DeviceMap(size_t cell_width, size_t cell_height, size_t resolution,
Logger* log)
: map_{nullptr},
data_{cell_width, cell_height},
resolution_{resolution},
log_{log} {
CHECK_CUDA(hipMalloc(&map_, sizeof(Map)), "Could not allocate device map");
Map map(data_.device_handle(), resolution_);
CHECK_CUDA(hipMemcpy(map_, &map, sizeof(Map), hipMemcpyHostToDevice),
"Could not memcpy device map to device");
data_.memset(0);
tracepoint(gpu_planning, map_creation, width(), height(), resolution_);
}
DeviceMap::DeviceMap(DeviceMap&& other) noexcept
: map_{other.map_},
data_{std::move(other.data_)},
resolution_{other.resolution_},
log_{other.log_} {
other.map_ = nullptr;
}
DeviceMap& DeviceMap::operator=(DeviceMap&& other) noexcept {
if (this != &other) {
SAFE_CUDA_FREE(map_, "Could not free device map");
map_ = other.map_;
data_ = std::move(other.data_);
resolution_ = other.resolution_;
log_ = other.log_;
other.map_ = nullptr;
}
return *this;
}
DeviceMap::~DeviceMap() { SAFE_CUDA_FREE(map_, "Could not free device map"); }
float DeviceMap::width() const { return (float)data_.width() / resolution_; }
float DeviceMap::height() const { return (float)data_.height() / resolution_; }
size_t DeviceMap::index_width() const { return data_.width(); }
size_t DeviceMap::index_height() const { return data_.height(); }
size_t DeviceMap::resolution() const { return resolution_; }
Map* DeviceMap::device_map() const { return map_; }
Position<size_t> DeviceMap::to_index(const Position<float>& position) const {
return Position<size_t>(position.x * resolution_, position.y * resolution_);
}
Pose<size_t> DeviceMap::to_index(const Pose<float>& pose) const {
return Pose<size_t>(to_index(pose.position), pose.orientation);
}
Position<float> DeviceMap::from_index(const Position<size_t>& index) const {
return Position<float>(static_cast<float>(index.x) / resolution_,
static_cast<float>(index.y) / resolution_);
}
Pose<float> DeviceMap::from_index(const Pose<size_t>& index) const {
return Pose<float>(from_index(index.position), index.orientation);
}
HostMap DeviceMap::load_to_host() {
HostMap result(index_width(), index_height(), resolution_, log_);
data_.memcpy_get(*result.data());
return result;
}
__global__ void device_consolidate_data(Array2d<Cell>* map,
Array2d<float>* dest) {
const size_t x_fact = map->width() / dest->width();
const size_t y_fact = map->height() / dest->height();
for (size_t j = threadIdx.y; j < dest->height(); j += blockDim.y) {
for (size_t i = threadIdx.x; i < dest->width(); i += blockDim.x) {
float sum = 0.f;
for (size_t cy = 0; cy < y_fact; ++cy) {
for (size_t cx = 0; cx < x_fact; ++cx) {
const size_t x = i * x_fact + cx;
const size_t y = j * y_fact + cy;
sum += map->at(x, y).value;
}
}
dest->at(i, j) = sum / (x_fact * y_fact);
}
}
}
void DeviceMap::get_data(float* dest, size_t max_width, size_t max_height,
size_t* result_width, size_t* result_height) {
const size_t map_width = data_.width();
const size_t map_height = data_.height();
const size_t x_fact = map_width / (max_width + 1) + 1;
const size_t y_fact = map_height / (max_height + 1) + 1;
const size_t fact = max(x_fact, y_fact);
const size_t sub_width = map_width / fact;
const size_t sub_height = map_height / fact;
DeviceArray2d<float> sub(sub_width, sub_height);
hipLaunchKernelGGL(( device_consolidate_data), dim3(1), dim3(dim3(32, 32)), 0, 0, data_.device_handle(),
sub.device_handle());
Array2d<float> dest_array(dest, sub_width, sub_height,
sub_width * sizeof(float));
sub.memcpy_get(dest_array);
*result_width = sub_width;
*result_height = sub_height;
}
} // namespace gpu_planning
|
0b3653055367589c89b8d9a1e066e6044fe079fd.cu
|
#include <algorithm>
#include <gpu_planning_tracepoints/tracepoints.hpp>
#include <string>
#include "cuda_util.hpp"
#include "map.hpp"
namespace gpu_planning {
__host__ __device__ Cell::Cell() : value{0.f}, id{0} {}
__host__ __device__ Cell::Cell(float value, uint8_t id)
: value{value}, id{id} {}
__host__ __device__ Map::Map() : data_{nullptr}, resolution_{0} {}
__host__ __device__ Map::Map(Array2d<Cell>* data, size_t resolution)
: data_{data}, resolution_{resolution} {}
__host__ __device__ float Map::width() const {
return static_cast<float>(data_->width()) / resolution_;
}
__host__ __device__ float Map::height() const {
return static_cast<float>(data_->height()) / resolution_;
}
__host__ __device__ size_t Map::resolution() const { return resolution_; }
__host__ __device__ Array2d<Cell>* Map::data() const { return data_; }
__host__ __device__ Position<size_t> Map::to_index(
const Position<float>& position) const {
return Position<size_t>(position.x * resolution_, position.y * resolution_);
}
__host__ __device__ Pose<size_t> Map::to_index(const Pose<float>& pose) const {
return Pose<size_t>(to_index(pose.position), pose.orientation);
}
__host__ __device__ Position<float> Map::from_index(
const Position<size_t>& index) const {
return Position<float>(static_cast<float>(index.x) / resolution_,
static_cast<float>(index.y) / resolution_);
}
__host__ __device__ Pose<float> Map::from_index(
const Pose<size_t>& index) const {
return Pose<float>(from_index(index.position), index.orientation);
}
__host__ __device__ const Cell& Map::get(const Position<float>& position) {
return data_->at(to_index(position));
}
HostMap::HostMap() : map_storage_{}, map_array_{}, log_{nullptr} {}
HostMap::HostMap(size_t cell_width, size_t cell_height, size_t resolution,
Logger* log)
: map_storage_{cell_width * cell_height},
map_array_{map_storage_.data(), cell_width, cell_height,
cell_width * sizeof(Cell)},
log_{log} {
data_ = &map_array_;
resolution_ = resolution;
}
DeviceMap::DeviceMap() : map_{nullptr}, data_{}, resolution_{}, log_{nullptr} {}
DeviceMap::DeviceMap(size_t cell_width, size_t cell_height, size_t resolution,
Logger* log)
: map_{nullptr},
data_{cell_width, cell_height},
resolution_{resolution},
log_{log} {
CHECK_CUDA(cudaMalloc(&map_, sizeof(Map)), "Could not allocate device map");
Map map(data_.device_handle(), resolution_);
CHECK_CUDA(cudaMemcpy(map_, &map, sizeof(Map), cudaMemcpyHostToDevice),
"Could not memcpy device map to device");
data_.memset(0);
tracepoint(gpu_planning, map_creation, width(), height(), resolution_);
}
DeviceMap::DeviceMap(DeviceMap&& other) noexcept
: map_{other.map_},
data_{std::move(other.data_)},
resolution_{other.resolution_},
log_{other.log_} {
other.map_ = nullptr;
}
DeviceMap& DeviceMap::operator=(DeviceMap&& other) noexcept {
if (this != &other) {
SAFE_CUDA_FREE(map_, "Could not free device map");
map_ = other.map_;
data_ = std::move(other.data_);
resolution_ = other.resolution_;
log_ = other.log_;
other.map_ = nullptr;
}
return *this;
}
DeviceMap::~DeviceMap() { SAFE_CUDA_FREE(map_, "Could not free device map"); }
float DeviceMap::width() const { return (float)data_.width() / resolution_; }
float DeviceMap::height() const { return (float)data_.height() / resolution_; }
size_t DeviceMap::index_width() const { return data_.width(); }
size_t DeviceMap::index_height() const { return data_.height(); }
size_t DeviceMap::resolution() const { return resolution_; }
Map* DeviceMap::device_map() const { return map_; }
Position<size_t> DeviceMap::to_index(const Position<float>& position) const {
return Position<size_t>(position.x * resolution_, position.y * resolution_);
}
Pose<size_t> DeviceMap::to_index(const Pose<float>& pose) const {
return Pose<size_t>(to_index(pose.position), pose.orientation);
}
Position<float> DeviceMap::from_index(const Position<size_t>& index) const {
return Position<float>(static_cast<float>(index.x) / resolution_,
static_cast<float>(index.y) / resolution_);
}
Pose<float> DeviceMap::from_index(const Pose<size_t>& index) const {
return Pose<float>(from_index(index.position), index.orientation);
}
HostMap DeviceMap::load_to_host() {
HostMap result(index_width(), index_height(), resolution_, log_);
data_.memcpy_get(*result.data());
return result;
}
__global__ void device_consolidate_data(Array2d<Cell>* map,
Array2d<float>* dest) {
const size_t x_fact = map->width() / dest->width();
const size_t y_fact = map->height() / dest->height();
for (size_t j = threadIdx.y; j < dest->height(); j += blockDim.y) {
for (size_t i = threadIdx.x; i < dest->width(); i += blockDim.x) {
float sum = 0.f;
for (size_t cy = 0; cy < y_fact; ++cy) {
for (size_t cx = 0; cx < x_fact; ++cx) {
const size_t x = i * x_fact + cx;
const size_t y = j * y_fact + cy;
sum += map->at(x, y).value;
}
}
dest->at(i, j) = sum / (x_fact * y_fact);
}
}
}
void DeviceMap::get_data(float* dest, size_t max_width, size_t max_height,
size_t* result_width, size_t* result_height) {
const size_t map_width = data_.width();
const size_t map_height = data_.height();
const size_t x_fact = map_width / (max_width + 1) + 1;
const size_t y_fact = map_height / (max_height + 1) + 1;
const size_t fact = max(x_fact, y_fact);
const size_t sub_width = map_width / fact;
const size_t sub_height = map_height / fact;
DeviceArray2d<float> sub(sub_width, sub_height);
device_consolidate_data<<<1, dim3(32, 32)>>>(data_.device_handle(),
sub.device_handle());
Array2d<float> dest_array(dest, sub_width, sub_height,
sub_width * sizeof(float));
sub.memcpy_get(dest_array);
*result_width = sub_width;
*result_height = sub_height;
}
} // namespace gpu_planning
|
ce3d32e655309cf6b1e75cbbf22a21ba4a277ba5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__device__ half pown_fw_element_dev(half x, std::int32_t k) {
// NOTE(odashi):
// std::abs(-0x80000000) is UB under 2's complement systems.
// However, this value should be also evaluated as 0x80000000 by directly
// casting to std::uint32_t.
const std::int32_t min_k = -0x80000000;
std::uint32_t remain = (k == min_k) ? min_k : ::abs(k);
float ret = 1.f;
float factor = ::__half2float(x);
// Performs the exponentation-by-squaring method.
while (remain) {
if (remain & 1) ret *= factor;
factor *= factor;
remain >>= 1;
}
return ::__float2half(k >= 0 ? ret : 1.f / ret);
}
__global__ void pown_fw_dev(
const half *px, std::int32_t k, std::uint32_t size, half *py) {
const std::uint32_t i = IDX;
if (i < size) py[i] = pown_fw_element_dev(px[i], k);
}
__global__ void pown_bw_dev(
const half *px, const half *py, const half *pgy, std::int32_t k,
std::uint32_t size, half *pgx) {
static_cast<void>(px);
static_cast<void>(py);
const std::uint32_t i = IDX;
if (i < size) INPLACE_ADD(pgx + i, k * GY_VAL * Y_VAL / X_VAL);
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::pown_fw_impl(const Tensor &x, std::int32_t k, Tensor &y) {
const std::uint32_t size = x.shape().size();
const std::uint32_t num_blocks = GRID_SIZE(size,dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::pown_fw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0,
CDATA(half, x), k, size, MDATA(half, y));
}
void CUDA16::pown_bw_impl(
const Tensor &x, const Tensor &y, const Tensor &gy, std::int32_t k,
Tensor &gx) {
const std::uint32_t size = x.shape().size();
const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::hipSetDevice(dev_id_));
hipLaunchKernelGGL(( ::pown_bw_dev), dim3(num_blocks), dim3(dim1_x_), 0, 0,
CDATA(half, x), CDATA(half, y), CDATA(half, gy), k, size,
MDATA(half, gx));
}
} // namespace devices
} // namespace primitiv
|
ce3d32e655309cf6b1e75cbbf22a21ba4a277ba5.cu
|
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__device__ half pown_fw_element_dev(half x, std::int32_t k) {
// NOTE(odashi):
// std::abs(-0x80000000) is UB under 2's complement systems.
// However, this value should be also evaluated as 0x80000000 by directly
// casting to std::uint32_t.
const std::int32_t min_k = -0x80000000;
std::uint32_t remain = (k == min_k) ? min_k : ::abs(k);
float ret = 1.f;
float factor = ::__half2float(x);
// Performs the exponentation-by-squaring method.
while (remain) {
if (remain & 1) ret *= factor;
factor *= factor;
remain >>= 1;
}
return ::__float2half(k >= 0 ? ret : 1.f / ret);
}
__global__ void pown_fw_dev(
const half *px, std::int32_t k, std::uint32_t size, half *py) {
const std::uint32_t i = IDX;
if (i < size) py[i] = pown_fw_element_dev(px[i], k);
}
__global__ void pown_bw_dev(
const half *px, const half *py, const half *pgy, std::int32_t k,
std::uint32_t size, half *pgx) {
static_cast<void>(px);
static_cast<void>(py);
const std::uint32_t i = IDX;
if (i < size) INPLACE_ADD(pgx + i, k * GY_VAL * Y_VAL / X_VAL);
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::pown_fw_impl(const Tensor &x, std::int32_t k, Tensor &y) {
const std::uint32_t size = x.shape().size();
const std::uint32_t num_blocks = GRID_SIZE(size,dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::pown_fw_dev<<<num_blocks, dim1_x_>>>(
CDATA(half, x), k, size, MDATA(half, y));
}
void CUDA16::pown_bw_impl(
const Tensor &x, const Tensor &y, const Tensor &gy, std::int32_t k,
Tensor &gx) {
const std::uint32_t size = x.shape().size();
const std::uint32_t num_blocks = GRID_SIZE(size, dim1_x_);
CUDA_CALL(::cudaSetDevice(dev_id_));
::pown_bw_dev<<<num_blocks, dim1_x_>>>(
CDATA(half, x), CDATA(half, y), CDATA(half, gy), k, size,
MDATA(half, gx));
}
} // namespace devices
} // namespace primitiv
|
8ce68e545d14cb23950a093299e1f8243c9580b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "roi_align_impl.cuh"
#include "util.cuh"
#include "runtime/device/gpu/cuda_common.h"
inline __device__ int roi_cast_int(float x) { return __float2int_rd(x); }
inline __device__ int roi_cast_int(half x) { return __half2int_rd(x); }
template <typename T>
__device__ void bilinear_interpolate(const int height, const int width, T y, T x, int *x_low, int *y_low, int *x_high,
int *y_high, T *w1, T *w2, T *w3, T *w4) {
// return 0 if out of map boundary
if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) {
*w1 = *w2 = *w3 = *w4 = 0;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
// low bounder is at least zero
y = y <= static_cast<T>(.0) ? static_cast<T>(.0) : y;
x = x <= static_cast<T>(.0) ? static_cast<T>(.0) : x;
// top left point
*y_low = roi_cast_int(y);
*x_low = roi_cast_int(x);
// bottom right point
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
// distance to nearest points
T lx, ly, hx, hy;
ly = y - static_cast<T>(*y_low), lx = x - static_cast<T>(*x_low);
hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx;
// weight is evaluated by the distance to point away.
// the closer to point home, the more weight, the farther to point away.
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
__device__ void bin_box(int thread_idx, const T *roi_boxes, int roi_cols, const T spatial_scale, const int sample_num,
int roi_end_mode, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, int *offset, int *n, int *c, int *ph, int *pw,
int *roi_bin_grid_h, int *roi_bin_grid_w, T *bin_size_h, T *bin_size_w, T *roi_start_h,
T *roi_start_w) {
// (n, c, ph, pw) is the base param of pooled map
*pw = thread_idx % pooled_width;
*ph = (thread_idx / pooled_width) % pooled_height;
*c = (thread_idx / pooled_width / pooled_height) % channels;
*n = thread_idx / pooled_width / pooled_height / channels;
// Roi has
// 1. 4 points, or
// 2. indicator + 4 points (1 + 4)
const T *roi_box = roi_boxes + (*n) * roi_cols;
int roi_batch_ind = 0;
if (roi_cols == 5) {
roi_batch_ind = roi_box[0];
roi_box++;
}
// Scale and shift ROI
T roi_offset = roi_end_mode == 1 ? static_cast<T>(0.5) : static_cast<T>(.0);
*roi_start_w = roi_box[0] * spatial_scale - roi_offset;
*roi_start_h = roi_box[1] * spatial_scale - roi_offset;
T roi_end_w = roi_box[2] * spatial_scale - roi_offset;
T roi_end_h = roi_box[3] * spatial_scale - roi_offset;
// New ROI height/width
T roi_width = roi_end_w - (*roi_start_w);
T roi_height = roi_end_h - (*roi_start_h);
// ratio of roi / pooled
*bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
*bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
*offset = (roi_batch_ind * channels + (*c)) * height * width;
// grid (int) by Sample ratio if defined, otherwise by pooled H/W
*roi_bin_grid_h = (sample_num > 0) ? sample_num : roi_cast_int(roi_height / static_cast<T>(pooled_height));
*roi_bin_grid_w = (sample_num > 0) ? sample_num : roi_cast_int(roi_width / static_cast<T>(pooled_width));
return;
}
template <typename T>
__global__ void ROIAlignKernel(size_t size, const T *input, const T *roi_boxes, int roi_cols, T *out_data,
const T spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width) {
for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size;
thread_idx += blockDim.x * gridDim.x) {
int n = thread_idx / pooled_width / pooled_height / channels;
const T *roi_box = roi_boxes + n * roi_cols;
if (roi_box[0] < static_cast<T>(0.001) && roi_box[1] < static_cast<T>(0.001) &&
roi_box[2] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001)) {
continue;
}
int offset, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w;
T bin_size_h, bin_size_w, roi_start_h, roi_start_w;
bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width,
pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h,
&bin_size_w, &roi_start_h, &roi_start_w);
// (n, c, ph, pw) is the base param of pooled map
const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w;
T accumulate_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
// Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT
const T y = roi_start_h + static_cast<T>(ph) * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + static_cast<T>(pw) * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// bilinear interpolate by shifted y / x
// calculate bilinear interpolation
int x_low = 0, y_low = 0, x_high = 0, y_high = 0;
T w1, w2, w3, w4;
bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4);
if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) {
T v1 = input[offset + y_low * width + x_low];
T v2 = input[offset + y_low * width + x_high];
T v3 = input[offset + y_high * width + x_low];
T v4 = input[offset + y_high * width + x_high];
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
accumulate_val += val;
}
}
}
accumulate_val /= count_points_in_grid_cell;
out_data[thread_idx] = accumulate_val;
}
}
template <typename T>
void ROIAlign(const T *x, const T *roi_boxes, int roi_rows, int roi_cols, T *out_data, const T spatial_scale,
const int sample_num, int roi_end_mode, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, hipStream_t cuda_stream) {
size_t size = roi_rows * channels * pooled_height * pooled_width;
hipLaunchKernelGGL(( ROIAlignKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, x, roi_boxes, roi_cols, out_data,
spatial_scale, sample_num, roi_end_mode, channels,
height, width, pooled_height, pooled_width);
return;
}
template void ROIAlign<float>(const float *x, const float *roi_boxes, int roi_rows, int roi_cols, float *out_data,
const float spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
hipStream_t cuda_stream);
template void ROIAlign<half>(const half *x, const half *roi_boxes, int roi_rows, int roi_cols, half *out_data,
const half spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
hipStream_t cuda_stream);
template <typename T>
__global__ void ROIAlignGradKernel(size_t size, const T *dy, const T *roi_boxes, int roi_cols, T *dx,
const T spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width) {
for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size;
thread_idx += blockDim.x * gridDim.x) {
int n = thread_idx / pooled_width / pooled_height / channels;
const T *roi_box = roi_boxes + n * roi_cols;
if (roi_box[0] < static_cast<T>(0.001) && roi_box[1] < static_cast<T>(0.001) &&
roi_box[2] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001)) {
continue;
}
int offset, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w;
T bin_size_h, bin_size_w, roi_start_h, roi_start_w;
bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width,
pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h,
&bin_size_w, &roi_start_h, &roi_start_w);
// (n, c, ph, pw) is the base param of pooled map
const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T *offset_top_diff = dy + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
// Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT
const T y = roi_start_h + static_cast<T>(ph) * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + static_cast<T>(pw) * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// bilinear interpolate by shifted y / x
// calculate bilinear interpolation
int x_low = 0, y_low = 0, x_high = 0, y_high = 0;
T w1, w2, w3, w4;
bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4);
if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) {
T g1 = top_diff_this_bin * w1 / count_points_in_grid_cell;
T g2 = top_diff_this_bin * w2 / count_points_in_grid_cell;
T g3 = top_diff_this_bin * w3 / count_points_in_grid_cell;
T g4 = top_diff_this_bin * w4 / count_points_in_grid_cell;
T *dx_1 = dx + offset + y_low * width + x_low;
T *dx_2 = dx + offset + y_low * width + x_high;
T *dx_3 = dx + offset + y_high * width + x_low;
T *dx_4 = dx + offset + y_high * width + x_high;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
MsAtomicAdd(dx_1, g1);
MsAtomicAdd(dx_2, g2);
MsAtomicAdd(dx_3, g3);
MsAtomicAdd(dx_4, g4);
}
}
}
}
}
}
template <typename T>
void ROIAlignGrad(const T *dy, const T *roi_boxes, int roi_rows, int roi_cols, T *dx, const T spatial_scale,
const int sample_num, int roi_end_mode, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, hipStream_t cuda_stream) {
size_t size = roi_rows * channels * pooled_height * pooled_width;
hipLaunchKernelGGL(( ROIAlignGradKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size, dy, roi_boxes, roi_cols, dx, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height,
pooled_width);
return;
}
template void ROIAlignGrad<float>(const float *dy, const float *roi_boxes, int roi_rows, int roi_cols, float *dx,
const float spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
hipStream_t cuda_stream);
template void ROIAlignGrad<half>(const half *dy, const half *roi_boxes, int roi_rows, int roi_cols, half *dx,
const half spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
hipStream_t cuda_stream);
|
8ce68e545d14cb23950a093299e1f8243c9580b0.cu
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "roi_align_impl.cuh"
#include "util.cuh"
#include "runtime/device/gpu/cuda_common.h"
inline __device__ int roi_cast_int(float x) { return __float2int_rd(x); }
inline __device__ int roi_cast_int(half x) { return __half2int_rd(x); }
template <typename T>
__device__ void bilinear_interpolate(const int height, const int width, T y, T x, int *x_low, int *y_low, int *x_high,
int *y_high, T *w1, T *w2, T *w3, T *w4) {
// return 0 if out of map boundary
if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) {
*w1 = *w2 = *w3 = *w4 = 0;
*x_low = *x_high = *y_low = *y_high = -1;
return;
}
// low bounder is at least zero
y = y <= static_cast<T>(.0) ? static_cast<T>(.0) : y;
x = x <= static_cast<T>(.0) ? static_cast<T>(.0) : x;
// top left point
*y_low = roi_cast_int(y);
*x_low = roi_cast_int(x);
// bottom right point
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
// distance to nearest points
T lx, ly, hx, hy;
ly = y - static_cast<T>(*y_low), lx = x - static_cast<T>(*x_low);
hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx;
// weight is evaluated by the distance to point away.
// the closer to point home, the more weight, the farther to point away.
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <typename T>
__device__ void bin_box(int thread_idx, const T *roi_boxes, int roi_cols, const T spatial_scale, const int sample_num,
int roi_end_mode, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, int *offset, int *n, int *c, int *ph, int *pw,
int *roi_bin_grid_h, int *roi_bin_grid_w, T *bin_size_h, T *bin_size_w, T *roi_start_h,
T *roi_start_w) {
// (n, c, ph, pw) is the base param of pooled map
*pw = thread_idx % pooled_width;
*ph = (thread_idx / pooled_width) % pooled_height;
*c = (thread_idx / pooled_width / pooled_height) % channels;
*n = thread_idx / pooled_width / pooled_height / channels;
// Roi has
// 1. 4 points, or
// 2. indicator + 4 points (1 + 4)
const T *roi_box = roi_boxes + (*n) * roi_cols;
int roi_batch_ind = 0;
if (roi_cols == 5) {
roi_batch_ind = roi_box[0];
roi_box++;
}
// Scale and shift ROI
T roi_offset = roi_end_mode == 1 ? static_cast<T>(0.5) : static_cast<T>(.0);
*roi_start_w = roi_box[0] * spatial_scale - roi_offset;
*roi_start_h = roi_box[1] * spatial_scale - roi_offset;
T roi_end_w = roi_box[2] * spatial_scale - roi_offset;
T roi_end_h = roi_box[3] * spatial_scale - roi_offset;
// New ROI height/width
T roi_width = roi_end_w - (*roi_start_w);
T roi_height = roi_end_h - (*roi_start_h);
// ratio of roi / pooled
*bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
*bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
*offset = (roi_batch_ind * channels + (*c)) * height * width;
// grid (int) by Sample ratio if defined, otherwise by pooled H/W
*roi_bin_grid_h = (sample_num > 0) ? sample_num : roi_cast_int(roi_height / static_cast<T>(pooled_height));
*roi_bin_grid_w = (sample_num > 0) ? sample_num : roi_cast_int(roi_width / static_cast<T>(pooled_width));
return;
}
template <typename T>
__global__ void ROIAlignKernel(size_t size, const T *input, const T *roi_boxes, int roi_cols, T *out_data,
const T spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width) {
for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size;
thread_idx += blockDim.x * gridDim.x) {
int n = thread_idx / pooled_width / pooled_height / channels;
const T *roi_box = roi_boxes + n * roi_cols;
if (roi_box[0] < static_cast<T>(0.001) && roi_box[1] < static_cast<T>(0.001) &&
roi_box[2] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001)) {
continue;
}
int offset, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w;
T bin_size_h, bin_size_w, roi_start_h, roi_start_w;
bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width,
pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h,
&bin_size_w, &roi_start_h, &roi_start_w);
// (n, c, ph, pw) is the base param of pooled map
const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w;
T accumulate_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
// Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT
const T y = roi_start_h + static_cast<T>(ph) * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + static_cast<T>(pw) * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// bilinear interpolate by shifted y / x
// calculate bilinear interpolation
int x_low = 0, y_low = 0, x_high = 0, y_high = 0;
T w1, w2, w3, w4;
bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4);
if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) {
T v1 = input[offset + y_low * width + x_low];
T v2 = input[offset + y_low * width + x_high];
T v3 = input[offset + y_high * width + x_low];
T v4 = input[offset + y_high * width + x_high];
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
accumulate_val += val;
}
}
}
accumulate_val /= count_points_in_grid_cell;
out_data[thread_idx] = accumulate_val;
}
}
template <typename T>
void ROIAlign(const T *x, const T *roi_boxes, int roi_rows, int roi_cols, T *out_data, const T spatial_scale,
const int sample_num, int roi_end_mode, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, cudaStream_t cuda_stream) {
size_t size = roi_rows * channels * pooled_height * pooled_width;
ROIAlignKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, x, roi_boxes, roi_cols, out_data,
spatial_scale, sample_num, roi_end_mode, channels,
height, width, pooled_height, pooled_width);
return;
}
template void ROIAlign<float>(const float *x, const float *roi_boxes, int roi_rows, int roi_cols, float *out_data,
const float spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
cudaStream_t cuda_stream);
template void ROIAlign<half>(const half *x, const half *roi_boxes, int roi_rows, int roi_cols, half *out_data,
const half spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
cudaStream_t cuda_stream);
template <typename T>
__global__ void ROIAlignGradKernel(size_t size, const T *dy, const T *roi_boxes, int roi_cols, T *dx,
const T spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width) {
for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size;
thread_idx += blockDim.x * gridDim.x) {
int n = thread_idx / pooled_width / pooled_height / channels;
const T *roi_box = roi_boxes + n * roi_cols;
if (roi_box[0] < static_cast<T>(0.001) && roi_box[1] < static_cast<T>(0.001) &&
roi_box[2] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001)) {
continue;
}
int offset, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w;
T bin_size_h, bin_size_w, roi_start_h, roi_start_w;
bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width,
pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h,
&bin_size_w, &roi_start_h, &roi_start_w);
// (n, c, ph, pw) is the base param of pooled map
const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T *offset_top_diff = dy + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
// Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT
const T y = roi_start_h + static_cast<T>(ph) * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + static_cast<T>(pw) * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
// bilinear interpolate by shifted y / x
// calculate bilinear interpolation
int x_low = 0, y_low = 0, x_high = 0, y_high = 0;
T w1, w2, w3, w4;
bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4);
if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) {
T g1 = top_diff_this_bin * w1 / count_points_in_grid_cell;
T g2 = top_diff_this_bin * w2 / count_points_in_grid_cell;
T g3 = top_diff_this_bin * w3 / count_points_in_grid_cell;
T g4 = top_diff_this_bin * w4 / count_points_in_grid_cell;
T *dx_1 = dx + offset + y_low * width + x_low;
T *dx_2 = dx + offset + y_low * width + x_high;
T *dx_3 = dx + offset + y_high * width + x_low;
T *dx_4 = dx + offset + y_high * width + x_high;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
MsAtomicAdd(dx_1, g1);
MsAtomicAdd(dx_2, g2);
MsAtomicAdd(dx_3, g3);
MsAtomicAdd(dx_4, g4);
}
}
}
}
}
}
template <typename T>
void ROIAlignGrad(const T *dy, const T *roi_boxes, int roi_rows, int roi_cols, T *dx, const T spatial_scale,
const int sample_num, int roi_end_mode, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, cudaStream_t cuda_stream) {
size_t size = roi_rows * channels * pooled_height * pooled_width;
ROIAlignGradKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size, dy, roi_boxes, roi_cols, dx, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height,
pooled_width);
return;
}
template void ROIAlignGrad<float>(const float *dy, const float *roi_boxes, int roi_rows, int roi_cols, float *dx,
const float spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
cudaStream_t cuda_stream);
template void ROIAlignGrad<half>(const half *dy, const half *roi_boxes, int roi_rows, int roi_cols, half *dx,
const half spatial_scale, const int sample_num, int roi_end_mode, const int channels,
const int height, const int width, const int pooled_height, const int pooled_width,
cudaStream_t cuda_stream);
|
1996c241366782b9bdd22137cd56b08630e14e77.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeadd2.cu, normal z -> s, Mon Jun 25 18:24:11 2018
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset.
*/
__global__
void sgeadd2_full(
int m, int n,
float alpha,
const float *dA, int ldda,
float beta,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD2 adds two matrices, dB = alpha*dA + beta*dB.
@see ZGEADD for dB = alpha*dA + dB, lacking beta.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dA REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
beta REAL
The scalar beta.
@param[in,out]
dB REAL array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_sgeadd2(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
float beta,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( sgeadd2_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, beta, dB, lddb );
}
|
1996c241366782b9bdd22137cd56b08630e14e77.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeadd2.cu, normal z -> s, Mon Jun 25 18:24:11 2018
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset.
*/
__global__
void sgeadd2_full(
int m, int n,
float alpha,
const float *dA, int ldda,
float beta,
float *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD2 adds two matrices, dB = alpha*dA + beta*dB.
@see ZGEADD for dB = alpha*dA + dB, lacking beta.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha REAL
The scalar alpha.
@param[in]
dA REAL array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
beta REAL
The scalar beta.
@param[in,out]
dB REAL array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_sgeadd2(
magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
float beta,
magmaFloat_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
sgeadd2_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, beta, dB, lddb );
}
|
9b8d23761714ebef5ad8b87d46f9c861c00f0943.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t *array, int64_t num_elems)
{
int i;
for(i=0; i<num_elems; i++)
array[i] = sin((double)array[i]);
}
extern "C" void allocate_mem(int64_t **device_value, int64_t num_elems)
{
gpuErrchk( hipMalloc((void**)device_value, num_elems*sizeof(int64_t)) );
}
extern "C" void copy_to_device(int64_t *host_array, int64_t *device_array, int64_t num_elems)
{
gpuErrchk( hipMemcpy(device_array, host_array, num_elems*sizeof(int64_t), hipMemcpyHostToDevice) );
}
extern "C" void copy_from_device(int64_t *host_array, int64_t *device_array, int64_t num_elems)
{
gpuErrchk( hipMemcpy(host_array, device_array, 1*sizeof(int64_t), hipMemcpyDeviceToHost) );
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t *completed_cycles, int64_t requested_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel in default stream
hipLaunchKernelGGL(( sleep), dim3(gridSize), dim3(blockSize) , 0, 0, completed_cycles, requested_cycles);
}
// Wait for all work to complete
extern "C" void wait_for_gpu()
{
hipDeviceSynchronize();
}
|
9b8d23761714ebef5ad8b87d46f9c861c00f0943.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t *array, int64_t num_elems)
{
int i;
for(i=0; i<num_elems; i++)
array[i] = sin((double)array[i]);
}
extern "C" void allocate_mem(int64_t **device_value, int64_t num_elems)
{
gpuErrchk( cudaMalloc((void**)device_value, num_elems*sizeof(int64_t)) );
}
extern "C" void copy_to_device(int64_t *host_array, int64_t *device_array, int64_t num_elems)
{
gpuErrchk( cudaMemcpy(device_array, host_array, num_elems*sizeof(int64_t), cudaMemcpyHostToDevice) );
}
extern "C" void copy_from_device(int64_t *host_array, int64_t *device_array, int64_t num_elems)
{
gpuErrchk( cudaMemcpy(host_array, device_array, 1*sizeof(int64_t), cudaMemcpyDeviceToHost) );
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t *completed_cycles, int64_t requested_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel in default stream
sleep<<< gridSize, blockSize >>>(completed_cycles, requested_cycles);
}
// Wait for all work to complete
extern "C" void wait_for_gpu()
{
cudaDeviceSynchronize();
}
|
7f39a4725cdf2f31a478e292ed3b9140962d1f75.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlarfbx.cu, normal z -> s, Tue Aug 30 09:38:30 2016
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/******************************************************************************/
extern "C"
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Call
magma_sgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau)
to compute
SGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
*/
extern "C"
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c,
float *dwork, float *tau)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
if (i == 0)
c[0] = MAGMA_S_ONE;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
/******************************************************************************/
extern "C"
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
float lsum;
V += j;
lsum = MAGMA_S_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_S_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
/******************************************************************************/
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_slarfbx_gpu_q(
magma_int_t m, magma_int_t k,
magmaFloat_ptr V, magma_int_t ldv,
magmaFloat_ptr dT, magma_int_t ldt,
magmaFloat_ptr c,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_sgemv_kernel1)
, dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, V, ldv, c, dwork);
/* dwork = T**H dwork */
hipLaunchKernelGGL(( magma_strmv_tkernel)
, dim3(k), dim3(k), 0, queue->cuda_stream() ,
dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_sgemv_kernel2)
, dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() ,
m, k, V, ldv, dwork+k, c);
}
|
7f39a4725cdf2f31a478e292ed3b9140962d1f75.cu
|
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlarfbx.cu, normal z -> s, Tue Aug 30 09:38:30 2016
*/
#include "magma_internal.h"
#include "commonblas_s.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/******************************************************************************/
extern "C"
__global__ void
magma_sgemv_kernel1(int m, const float * __restrict__ V, int ldv,
const float * __restrict__ c,
float *dwork)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Call
magma_sgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau)
to compute
SGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
*/
extern "C"
__global__ void
magma_sgemv_kernel3(int m, const float * __restrict__ V, int ldv, float *c,
float *dwork, float *tau)
{
const int i = threadIdx.x;
const float *dV = V + (blockIdx.x) * ldv;
__shared__ float sum[ BLOCK_SIZE ];
float lsum;
if (i == 0)
c[0] = MAGMA_S_ONE;
/* lsum := v**H * C */
lsum = MAGMA_S_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
/******************************************************************************/
extern "C"
__global__ void
magma_sgemv_kernel2(int m, int n, const float * __restrict__ V, int ldv,
const float * __restrict__ x, float *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
float lsum;
V += j;
lsum = MAGMA_S_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_S_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
/******************************************************************************/
/*
Apply a real block reflector H to a real vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the real k-by-k upper triangular matrix in the
representation of the block reflector, and V is a real block of
k elementary reflectors.
*/
extern "C" void
magma_slarfbx_gpu_q(
magma_int_t m, magma_int_t k,
magmaFloat_ptr V, magma_int_t ldv,
magmaFloat_ptr dT, magma_int_t ldt,
magmaFloat_ptr c,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
/* dwork = V**H c */
magma_sgemv_kernel1
<<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m, V, ldv, c, dwork);
/* dwork = T**H dwork */
magma_strmv_tkernel
<<< k, k, 0, queue->cuda_stream() >>>
( dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
magma_sgemv_kernel2
<<< blocks3, threads3, 0, queue->cuda_stream() >>>
( m, k, V, ldv, dwork+k, c);
}
|
0f1f3d1870cb3fe0b17191c0e25b554d23255699.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_log10 (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = log10(x[id]);
}
}
|
0f1f3d1870cb3fe0b17191c0e25b554d23255699.cu
|
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_log10 (int n, double *result, double *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
if (id < n)
{
result[id] = log10(x[id]);
}
}
|
96df5a06132869f097023a6b49821e0a5c78eed4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "gpu_util.h"
#include "kmeans_util_sa.h"
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
/* gpu parameters */
//#define GRID_SIZE 16
//#define BLOCK_SIZE 256
#define DIMENSION 2
// #define KMEANS
#define SA
// #define MINI_BATCHES
int main(int argc, char *argv[]) {
int n, k, old_k, i, j;
int dim = 2;
double **points;
int BLOCK_SIZE = 256; //Default
if (argc > 1) BLOCK_SIZE = atoi(argv[1]);
if (argc == 4) k = atoi(argv[2]);
//The second input argument should be the dataset filename
FILE *in;
if (argc == 4) {
in = fopen(argv[3], "r");
} else if (argc > 2) {
in = fopen(argv[2], "r");
} else {
in = stdin;
}
//Parse file
register short read_items = -1;
read_items = fscanf(in, "%d %d %d\n", &n ,&old_k, &dim);
if (read_items != 3){
printf("Something went wrong with reading the parameters!\n");
return EXIT_FAILURE;
}
points = create_2D_double_array(n, dim);
for (i =0; i<n; i++) {
for (j=0; j<dim; j++) {
read_items = fscanf(in, "%lf", &points[i][j]);
if (read_items != 1) {
printf("Something went wrong with reading the points!\n");
}
}
}
fclose(in);
if (argc < 4) k = old_k;
printf("Input Read successfully \n");
//Create CUBLAS Handles
hipblasStatus_t stat;
hipblasHandle_t handle;
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
// Calculate grid and block sizes
int grid_size = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 gpu_grid(grid_size, 1);
dim3 gpu_block(BLOCK_SIZE, 1);
int thread_num = grid_size * BLOCK_SIZE;
printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y);
printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y);
clock_t start = clock();
double **centers;
printf("Initializing Centers...\n");
centers = init_centers_kpp(points, n, k, dim);
printf("Initializing Centers done\n");
// start algorithm
double *points_clusters;
points_clusters = (double *)calloc(n*k, sizeof(double));
// GPU allocations
double *dev_centers, *dev_points, *dev_centers_of_points;
double *dev_points_help;
double *dev_new_centers;
double *dev_points_clusters;
int *dev_points_clusters_old;
double *dev_points_in_cluster;
double *dev_ones;
//RNG CUDA States
hiprandState_t* devStates;
dev_centers = (double *) gpu_alloc(k*dim*sizeof(double));
dev_points = (double *) gpu_alloc(n*dim*sizeof(double));
dev_centers_of_points = (double *) gpu_alloc(n*dim*sizeof(double));
dev_points_in_cluster = (double *) gpu_alloc(k*sizeof(double));
dev_points_clusters = (double *) gpu_alloc(n*k*sizeof(double));
dev_points_clusters_old = (int *) gpu_alloc(n*sizeof(int)); //Used for SA SAKM
dev_new_centers = (double *) gpu_alloc(k*dim*sizeof(double));
dev_ones = (double *) gpu_alloc(n*sizeof(double));
dev_points_help = (double *) gpu_alloc(n*sizeof(double));
printf("GPU allocs done \n");
call_create_dev_ones(dev_ones, n, gpu_grid, gpu_block);
// Transpose points and centers for cublas
// TODO: Transpose at cublas in gpu
double * staging_points = (double*) calloc(n*dim, sizeof(double));
double * staging_centers = (double*) calloc(k*dim, sizeof(double));
transpose(points, staging_points, n, dim);
transpose(centers, staging_centers, k, dim);
// Copy points to GPU
if (copy_to_gpu(staging_points, dev_points, n*dim*sizeof(double)) != 0) {
printf("Error in copy_to_gpu points\n");
return -1;
}
// Copy centers to GPU
if (copy_to_gpu(staging_centers, dev_centers, k*dim*sizeof(double)) != 0) {
printf("Error in copy_to_gpu centers\n");
return -1;
}
//Setup Random States
hipMalloc(&devStates, thread_num * sizeof(hiprandState_t));
setup_RNG_states(devStates, gpu_grid, gpu_block);
//Init the result_cluster arrays once
init_point_clusters(dev_points, dev_centers,
n, k, dim,
gpu_grid, gpu_block,
dev_points_clusters, dev_points_clusters_old,
devStates);
// FIXME: For now we pass TWO matrices for centers, one normal and
// one transposed. The transposed can be omitted by doing some
// changes in Step 1 of K-Means.
double *dev_temp_centers, *dev_temp_points_clusters;
dev_temp_centers = (double *) gpu_alloc(k*dim*sizeof(double));
dev_temp_points_clusters = (double *) gpu_alloc(n*k*sizeof(double));
int step = 1;
int check = 0;
int* dev_check = (int *) gpu_alloc(sizeof(int));
double* dev_cost = (double *) gpu_alloc(sizeof(double));
// printf("Loop Start \n");
// Debug
// for(i=0;i<k;i++){
// for(j=0;j<k*dim;j+=k)
// printf("%lf,\t", staging_centers[j + i]);
// printf("\n");
// }
srand(unsigned(time(NULL)));
/*
SA & K-MEANS ALGORITHM
*/
//SA config
//SA starting temperature should be set so that the probablities of making moves on the very
//first iteration should be very close to 1.
//Start temp of 100 seems to be working good for the tested datasets
double start_temp = 100.0;
double temp = start_temp;
int eq_iterations = 5000;
double best_cost = DBL_MAX;
#ifdef SA
//SA loop
printf("Starting SA on GPU \n");
int eq_counter = 0;
int same_cost_for_n_iters = 0;
double curr_cost = -123;
while(eq_counter < eq_iterations) {
//printf("SA Temp: %lf \n", temp);
//Sample solution space with SA
double cost = kmeans_on_gpu_SA(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters,
dev_points_clusters_old,
dev_points_in_cluster,
dev_centers_of_points,
dev_new_centers,
dev_check,
gpu_grid,
gpu_block,
handle,
stat,
dev_ones,
dev_points_help,
dev_temp_centers,
devStates,
temp);
step += 1;
eq_counter++;
//Acceptance checks
if (cost <= best_cost){
//Accept the solution immediately
//Found better solution
best_cost = cost;
//printf("Found Better Solution: %lf Temp %lf\n", cost, temp);
hipMemcpy(dev_centers, dev_new_centers, sizeof(double)*k*dim, hipMemcpyDeviceToDevice);
//Storing global best to temp_centers
hipMemcpy(dev_temp_centers, dev_new_centers, sizeof(double)*k*dim, hipMemcpyDeviceToDevice);
hipMemcpy(dev_temp_points_clusters, dev_points_clusters, sizeof(double)*k*n, hipMemcpyDeviceToDevice);
} else {
//Accept the solution with probability
double accept_factor = 0.5; // The larger the factor the less the probability becomes
//Increasing the factor is equivalent with decreasing the start_temp
double prob = exp(-accept_factor*(cost - best_cost)/start_temp);
double uniform = rand() / (RAND_MAX + 1.);
if (prob > uniform){
//Accept solution as the current one
// printf("Accepting with Prob: %lf Diff %lf\n", prob, cost - best_cost);
hipMemcpy(dev_centers, dev_new_centers, sizeof(double)*k*dim, hipMemcpyDeviceToDevice);
}
}
if (curr_cost == best_cost) {
same_cost_for_n_iters ++;
}
else {
same_cost_for_n_iters = 1;
curr_cost = best_cost;
}
// Just to make it stop ealrier because it doesn't change that often
if (same_cost_for_n_iters == 200) {
break;
}
}
//Storing global best to temp_centers
hipMemcpy(dev_centers, dev_temp_centers, sizeof(double)*k*dim, hipMemcpyDeviceToDevice);
hipMemcpy(dev_points_clusters, dev_temp_points_clusters, sizeof(double)*k*n, hipMemcpyDeviceToDevice);
// printf("SA Steps %d \n", step);
#endif
/*
DEFAULT K-MEANS ALGORITHM
*/
#ifdef KMEANS
step = 0;
printf("Proper KMeans Algorithm \n");
while (!check) {
double cost = kmeans_on_gpu(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters,
dev_points_in_cluster,
dev_centers_of_points,
dev_new_centers,
dev_check,
BLOCK_SIZE,
handle,
stat,
dev_ones,
dev_points_help,
dev_temp_centers);
copy_from_gpu(&check, dev_check, sizeof(int));
//printf("Step %4d Check: %d Cost: %lf \n", step, check, cost);
step += 1;
}
printf("KMeans algorithm steps %d \n", step);
#endif
//Post Processing
// double eval = evaluate_solution(dev_points, dev_centers, dev_points_clusters,
// dev_centers_of_points, dev_points_help,
// n, k, dim,
// gpu_grid, gpu_block,
// handle, stat);
// printf("Final Solution Value: %lf \n", eval);
printf("Total num. of steps is %d.\n", step);
double time_elapsed = (double)(clock() - start) / CLOCKS_PER_SEC;
printf("Total Time Elapsed: %lf seconds\n", time_elapsed);
printf("Time per step is %lf\n", time_elapsed / step);
// We keep the map of points to clusters in order to compute the final inertia
copy_from_gpu(staging_centers, dev_centers, k*dim*sizeof(double));
copy_from_gpu(points_clusters, dev_points_clusters, n*k*sizeof(double));
// Compute the final inertia
double inertia = 0;
int curr_cluster = 0;
// i in points
for(i=0;i<n;i++){
// Find point cluster index
curr_cluster = -1;
for(j=0;j<k;j++){
if(points_clusters[j*n+i] == 1.0){
curr_cluster = j;
break;
}
}
// Compute distance of point from specific cluster
double curr_dist = 0;
for(j=0;j<dim;j++){
curr_dist += pow(staging_centers[j*k + curr_cluster] - staging_points[j*n + i], 2);
}
inertia += sqrt(curr_dist);
}
printf("Sum of distances of samples to their closest cluster center: %lf\n", inertia);
FILE *f;
//Store Performance metrics
//For now just the time elapsed, in the future maybe we'll need memory GPU memory bandwidth etc...
f = fopen("log.out", "w");
fprintf(f, "Time Elapsed: %lf ", time_elapsed);
fclose(f);
// print & save results
f = fopen("centers.out", "w");
printf("Centers:\n");
for (i = 0; i < k; i++) {
for (j = 0; j < dim; j++){
printf("%lf ", staging_centers[j*k + i]);
fprintf(f, "%lf ", staging_centers[j*k + i]);
}
printf("\n");
fprintf(f, "\n");
}
fclose(f);
//Store Mapping Data in case we need it
f = fopen("point_cluster_map.out", "w");
for (i =0;i<k;i++){
for (j=0;j<n;j++){
fprintf(f, "%lf ", points_clusters[i*n + j]);
}
fprintf(f, "\n");
}
fclose(f);
// GPU clean
gpu_free(dev_centers);
gpu_free(dev_new_centers);
gpu_free(dev_temp_centers);
gpu_free(dev_points);
gpu_free(dev_points_clusters);
gpu_free(dev_temp_points_clusters);
gpu_free(dev_points_in_cluster);
gpu_free(dev_centers_of_points);
gpu_free(devStates);
stat = hipblasDestroy(handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS destruction failed\n");
return EXIT_FAILURE;
}
// clear and exit
delete_points(points);
delete_points(centers);
free(points_clusters);
return 0;
}
|
96df5a06132869f097023a6b49821e0a5c78eed4.cu
|
#include <string.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "gpu_util.h"
#include "kmeans_util_sa.h"
#include "cublas_v2.h"
#include <curand.h>
#include <curand_kernel.h>
/* gpu parameters */
//#define GRID_SIZE 16
//#define BLOCK_SIZE 256
#define DIMENSION 2
// #define KMEANS
#define SA
// #define MINI_BATCHES
int main(int argc, char *argv[]) {
int n, k, old_k, i, j;
int dim = 2;
double **points;
int BLOCK_SIZE = 256; //Default
if (argc > 1) BLOCK_SIZE = atoi(argv[1]);
if (argc == 4) k = atoi(argv[2]);
//The second input argument should be the dataset filename
FILE *in;
if (argc == 4) {
in = fopen(argv[3], "r");
} else if (argc > 2) {
in = fopen(argv[2], "r");
} else {
in = stdin;
}
//Parse file
register short read_items = -1;
read_items = fscanf(in, "%d %d %d\n", &n ,&old_k, &dim);
if (read_items != 3){
printf("Something went wrong with reading the parameters!\n");
return EXIT_FAILURE;
}
points = create_2D_double_array(n, dim);
for (i =0; i<n; i++) {
for (j=0; j<dim; j++) {
read_items = fscanf(in, "%lf", &points[i][j]);
if (read_items != 1) {
printf("Something went wrong with reading the points!\n");
}
}
}
fclose(in);
if (argc < 4) k = old_k;
printf("Input Read successfully \n");
//Create CUBLAS Handles
cublasStatus_t stat;
cublasHandle_t handle;
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
// Calculate grid and block sizes
int grid_size = (n+BLOCK_SIZE-1)/BLOCK_SIZE;
dim3 gpu_grid(grid_size, 1);
dim3 gpu_block(BLOCK_SIZE, 1);
int thread_num = grid_size * BLOCK_SIZE;
printf("Grid size : %dx%d\n", gpu_grid.x, gpu_grid.y);
printf("Block size: %dx%d\n", gpu_block.x, gpu_block.y);
clock_t start = clock();
double **centers;
printf("Initializing Centers...\n");
centers = init_centers_kpp(points, n, k, dim);
printf("Initializing Centers done\n");
// start algorithm
double *points_clusters;
points_clusters = (double *)calloc(n*k, sizeof(double));
// GPU allocations
double *dev_centers, *dev_points, *dev_centers_of_points;
double *dev_points_help;
double *dev_new_centers;
double *dev_points_clusters;
int *dev_points_clusters_old;
double *dev_points_in_cluster;
double *dev_ones;
//RNG CUDA States
curandState* devStates;
dev_centers = (double *) gpu_alloc(k*dim*sizeof(double));
dev_points = (double *) gpu_alloc(n*dim*sizeof(double));
dev_centers_of_points = (double *) gpu_alloc(n*dim*sizeof(double));
dev_points_in_cluster = (double *) gpu_alloc(k*sizeof(double));
dev_points_clusters = (double *) gpu_alloc(n*k*sizeof(double));
dev_points_clusters_old = (int *) gpu_alloc(n*sizeof(int)); //Used for SA SAKM
dev_new_centers = (double *) gpu_alloc(k*dim*sizeof(double));
dev_ones = (double *) gpu_alloc(n*sizeof(double));
dev_points_help = (double *) gpu_alloc(n*sizeof(double));
printf("GPU allocs done \n");
call_create_dev_ones(dev_ones, n, gpu_grid, gpu_block);
// Transpose points and centers for cublas
// TODO: Transpose at cublas in gpu
double * staging_points = (double*) calloc(n*dim, sizeof(double));
double * staging_centers = (double*) calloc(k*dim, sizeof(double));
transpose(points, staging_points, n, dim);
transpose(centers, staging_centers, k, dim);
// Copy points to GPU
if (copy_to_gpu(staging_points, dev_points, n*dim*sizeof(double)) != 0) {
printf("Error in copy_to_gpu points\n");
return -1;
}
// Copy centers to GPU
if (copy_to_gpu(staging_centers, dev_centers, k*dim*sizeof(double)) != 0) {
printf("Error in copy_to_gpu centers\n");
return -1;
}
//Setup Random States
cudaMalloc(&devStates, thread_num * sizeof(curandState));
setup_RNG_states(devStates, gpu_grid, gpu_block);
//Init the result_cluster arrays once
init_point_clusters(dev_points, dev_centers,
n, k, dim,
gpu_grid, gpu_block,
dev_points_clusters, dev_points_clusters_old,
devStates);
// FIXME: For now we pass TWO matrices for centers, one normal and
// one transposed. The transposed can be omitted by doing some
// changes in Step 1 of K-Means.
double *dev_temp_centers, *dev_temp_points_clusters;
dev_temp_centers = (double *) gpu_alloc(k*dim*sizeof(double));
dev_temp_points_clusters = (double *) gpu_alloc(n*k*sizeof(double));
int step = 1;
int check = 0;
int* dev_check = (int *) gpu_alloc(sizeof(int));
double* dev_cost = (double *) gpu_alloc(sizeof(double));
// printf("Loop Start \n");
// Debug
// for(i=0;i<k;i++){
// for(j=0;j<k*dim;j+=k)
// printf("%lf,\t", staging_centers[j + i]);
// printf("\n");
// }
srand(unsigned(time(NULL)));
/*
SA & K-MEANS ALGORITHM
*/
//SA config
//SA starting temperature should be set so that the probablities of making moves on the very
//first iteration should be very close to 1.
//Start temp of 100 seems to be working good for the tested datasets
double start_temp = 100.0;
double temp = start_temp;
int eq_iterations = 5000;
double best_cost = DBL_MAX;
#ifdef SA
//SA loop
printf("Starting SA on GPU \n");
int eq_counter = 0;
int same_cost_for_n_iters = 0;
double curr_cost = -123;
while(eq_counter < eq_iterations) {
//printf("SA Temp: %lf \n", temp);
//Sample solution space with SA
double cost = kmeans_on_gpu_SA(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters,
dev_points_clusters_old,
dev_points_in_cluster,
dev_centers_of_points,
dev_new_centers,
dev_check,
gpu_grid,
gpu_block,
handle,
stat,
dev_ones,
dev_points_help,
dev_temp_centers,
devStates,
temp);
step += 1;
eq_counter++;
//Acceptance checks
if (cost <= best_cost){
//Accept the solution immediately
//Found better solution
best_cost = cost;
//printf("Found Better Solution: %lf Temp %lf\n", cost, temp);
cudaMemcpy(dev_centers, dev_new_centers, sizeof(double)*k*dim, cudaMemcpyDeviceToDevice);
//Storing global best to temp_centers
cudaMemcpy(dev_temp_centers, dev_new_centers, sizeof(double)*k*dim, cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_temp_points_clusters, dev_points_clusters, sizeof(double)*k*n, cudaMemcpyDeviceToDevice);
} else {
//Accept the solution with probability
double accept_factor = 0.5; // The larger the factor the less the probability becomes
//Increasing the factor is equivalent with decreasing the start_temp
double prob = exp(-accept_factor*(cost - best_cost)/start_temp);
double uniform = rand() / (RAND_MAX + 1.);
if (prob > uniform){
//Accept solution as the current one
// printf("Accepting with Prob: %lf Diff %lf\n", prob, cost - best_cost);
cudaMemcpy(dev_centers, dev_new_centers, sizeof(double)*k*dim, cudaMemcpyDeviceToDevice);
}
}
if (curr_cost == best_cost) {
same_cost_for_n_iters ++;
}
else {
same_cost_for_n_iters = 1;
curr_cost = best_cost;
}
// Just to make it stop ealrier because it doesn't change that often
if (same_cost_for_n_iters == 200) {
break;
}
}
//Storing global best to temp_centers
cudaMemcpy(dev_centers, dev_temp_centers, sizeof(double)*k*dim, cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_points_clusters, dev_temp_points_clusters, sizeof(double)*k*n, cudaMemcpyDeviceToDevice);
// printf("SA Steps %d \n", step);
#endif
/*
DEFAULT K-MEANS ALGORITHM
*/
#ifdef KMEANS
step = 0;
printf("Proper KMeans Algorithm \n");
while (!check) {
double cost = kmeans_on_gpu(
dev_points,
dev_centers,
n, k, dim,
dev_points_clusters,
dev_points_in_cluster,
dev_centers_of_points,
dev_new_centers,
dev_check,
BLOCK_SIZE,
handle,
stat,
dev_ones,
dev_points_help,
dev_temp_centers);
copy_from_gpu(&check, dev_check, sizeof(int));
//printf("Step %4d Check: %d Cost: %lf \n", step, check, cost);
step += 1;
}
printf("KMeans algorithm steps %d \n", step);
#endif
//Post Processing
// double eval = evaluate_solution(dev_points, dev_centers, dev_points_clusters,
// dev_centers_of_points, dev_points_help,
// n, k, dim,
// gpu_grid, gpu_block,
// handle, stat);
// printf("Final Solution Value: %lf \n", eval);
printf("Total num. of steps is %d.\n", step);
double time_elapsed = (double)(clock() - start) / CLOCKS_PER_SEC;
printf("Total Time Elapsed: %lf seconds\n", time_elapsed);
printf("Time per step is %lf\n", time_elapsed / step);
// We keep the map of points to clusters in order to compute the final inertia
copy_from_gpu(staging_centers, dev_centers, k*dim*sizeof(double));
copy_from_gpu(points_clusters, dev_points_clusters, n*k*sizeof(double));
// Compute the final inertia
double inertia = 0;
int curr_cluster = 0;
// i in points
for(i=0;i<n;i++){
// Find point cluster index
curr_cluster = -1;
for(j=0;j<k;j++){
if(points_clusters[j*n+i] == 1.0){
curr_cluster = j;
break;
}
}
// Compute distance of point from specific cluster
double curr_dist = 0;
for(j=0;j<dim;j++){
curr_dist += pow(staging_centers[j*k + curr_cluster] - staging_points[j*n + i], 2);
}
inertia += sqrt(curr_dist);
}
printf("Sum of distances of samples to their closest cluster center: %lf\n", inertia);
FILE *f;
//Store Performance metrics
//For now just the time elapsed, in the future maybe we'll need memory GPU memory bandwidth etc...
f = fopen("log.out", "w");
fprintf(f, "Time Elapsed: %lf ", time_elapsed);
fclose(f);
// print & save results
f = fopen("centers.out", "w");
printf("Centers:\n");
for (i = 0; i < k; i++) {
for (j = 0; j < dim; j++){
printf("%lf ", staging_centers[j*k + i]);
fprintf(f, "%lf ", staging_centers[j*k + i]);
}
printf("\n");
fprintf(f, "\n");
}
fclose(f);
//Store Mapping Data in case we need it
f = fopen("point_cluster_map.out", "w");
for (i =0;i<k;i++){
for (j=0;j<n;j++){
fprintf(f, "%lf ", points_clusters[i*n + j]);
}
fprintf(f, "\n");
}
fclose(f);
// GPU clean
gpu_free(dev_centers);
gpu_free(dev_new_centers);
gpu_free(dev_temp_centers);
gpu_free(dev_points);
gpu_free(dev_points_clusters);
gpu_free(dev_temp_points_clusters);
gpu_free(dev_points_in_cluster);
gpu_free(dev_centers_of_points);
gpu_free(devStates);
stat = cublasDestroy(handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS destruction failed\n");
return EXIT_FAILURE;
}
// clear and exit
delete_points(points);
delete_points(centers);
free(points_clusters);
return 0;
}
|
88fb32eb4d187d961a0481ddb41365feb5673236.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <random>
#include "Constants.h"
#include <hip/hip_runtime.h>
void initializeBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass);
void runSimulation(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, char* image, float* hdImage);
//__global__ void interactBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass);
__global__ void clearF(float* Fx, float* Fy, float* Fz);
__global__ void interactn2(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz);
__global__ void updateAll(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz);
float magnitude(vec3 v);
void renderClear(char* image, float* hdImage);
__global__ void GPUrenderBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* hdImage);
float clamp(float x);
void writeRender(char* data, float* hdImage, int step);
int main()
{
std::cout << SYSTEM_THICKNESS << "AU thick disk\n";;
char *image = new char[WIDTH*HEIGHT*3];
float *hdImage = new float[WIDTH*HEIGHT*3];
//struct body *bodies = new struct body[NUM_BODIES];
float* xpos = new float[NUM_BODIES];
float* ypos = new float[NUM_BODIES];
float* zpos = new float[NUM_BODIES];
float* xvel = new float[NUM_BODIES];
float* yvel = new float[NUM_BODIES];
float* zvel = new float[NUM_BODIES];
float* mass = new float[NUM_BODIES];
initializeBodies(xpos,ypos,zpos,xvel,yvel,zvel,mass);
runSimulation(xpos,ypos,zpos,xvel,yvel,zvel,mass, image, hdImage);
std::cout << "\nwe made it\n";
delete[] image;
return 0;
}
void initializeBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass)
{
using std::uniform_real_distribution;
uniform_real_distribution<float> randAngle (0.0, 200.0*PI);
uniform_real_distribution<float> randRadius (INNER_BOUND, SYSTEM_SIZE);
uniform_real_distribution<float> randHeight (0.0, SYSTEM_THICKNESS);
std::default_random_engine gen (0);
float angle;
float radius;
float velocity;
//STARS
velocity = 0.67*sqrt((G*SOLAR_MASS)/(4*BINARY_SEPARATION*TO_METERS));
//STAR 1
xpos[0] = 0.0;///-BINARY_SEPARATION;
ypos[0] = 0.0;
zpos[0] = 0.0;
xvel[0] = 0.0;
yvel[0] = 0.0;//velocity;
zvel[0] = 0.0;
mass[0] = SOLAR_MASS;
///STARTS AT NUMBER OF STARS///
float totalExtraMass = 0.0;
for (int i=1; i<NUM_BODIES; i++)
{
angle = randAngle(gen);
radius = sqrt(SYSTEM_SIZE)*sqrt(randRadius(gen));
velocity = pow(((G*(SOLAR_MASS+((radius-INNER_BOUND)/SYSTEM_SIZE)*EXTRA_MASS*SOLAR_MASS))
/ (radius*TO_METERS)), 0.5);
xpos[i] = radius*cos(angle);
ypos[i] = radius*sin(angle);
zpos[i] = randHeight(gen)-SYSTEM_THICKNESS/2;
xvel[i] = velocity*sin(angle);
yvel[i] = -velocity*cos(angle);
zvel[i] = 0.0;
mass[i] = (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES;
totalExtraMass += (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES;
}
std::cout << "\nTotal Disk Mass: " << totalExtraMass;
std::cout << "\nEach Particle weight: " << (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES
<< "\n______________________________\n";
}
void runSimulation(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, char* image, float* hdImage)
{
float *d_xpos; float *d_ypos; float *d_zpos; float *d_xvel; float *d_yvel; float *d_zvel; float *d_mass; char *d_image; float *d_hdImage;
float *Fx; float *Fy; float* Fz;
hipMalloc(&d_xpos,NUM_BODIES*sizeof(float));
hipMalloc(&d_ypos,NUM_BODIES*sizeof(float));
hipMalloc(&d_zpos,NUM_BODIES*sizeof(float));
hipMalloc(&d_xvel,NUM_BODIES*sizeof(float));
hipMalloc(&d_yvel,NUM_BODIES*sizeof(float));
hipMalloc(&d_zvel,NUM_BODIES*sizeof(float));
hipMalloc(&d_mass,NUM_BODIES*sizeof(float));
hipMalloc(&Fx, NUM_BODIES*sizeof(float));
hipMalloc(&Fy, NUM_BODIES*sizeof(float));
hipMalloc(&Fz, NUM_BODIES*sizeof(float));
hipMalloc(&d_image,WIDTH*HEIGHT*3*sizeof(char));
hipMalloc(&d_hdImage,WIDTH*HEIGHT*3*sizeof(float));
int nBlocks=(NUM_BODIES+1024-1)/1024;
long nsqrBlocks=(NUM_BODIES/1024)*(NUM_BODIES/1024);
dim3 grid(nsqrBlocks,1024,1);
//createFirstFrame
renderClear(image, hdImage);
hipMemcpy(d_hdImage,hdImage,WIDTH*HEIGHT*3*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( GPUrenderBodies), dim3(nBlocks+1),dim3(1024), 0, 0, d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,d_hdImage);
hipMemcpy(hdImage,d_hdImage,WIDTH*HEIGHT*3*sizeof(float),hipMemcpyDeviceToHost);
writeRender(image, hdImage, 1);
for (int step=1; step<STEP_COUNT; step++)
{
std::cout << "\nBeginning timestep: " << step;
printf("\nStartK\n");
hipMemcpy(d_xpos, xpos, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ypos, ypos, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_zpos, zpos, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_xvel, xvel, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_yvel, yvel, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_zvel, zvel, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mass, mass, NUM_BODIES*sizeof(float), hipMemcpyHostToDevice);
printf("StartK1\n");
hipLaunchKernelGGL(( clearF), dim3(nBlocks+1),dim3(1024), 0, 0, Fx,Fy,Fz);
hipDeviceSynchronize();
hipLaunchKernelGGL(( interactn2), dim3(grid),dim3(1024), 0, 0, d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,Fx,Fy,Fz);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
printf("CUDA error:%s\n",hipGetErrorString(error));
}
hipLaunchKernelGGL(( updateAll), dim3(nBlocks+1),dim3(1024), 0, 0, d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,Fx,Fy,Fz);
printf("EndK\n");
hipMemcpy( xpos,d_xpos, NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( ypos, d_ypos,NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( zpos,d_zpos, NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( xvel,d_xvel, NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( yvel,d_yvel, NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( zvel,d_zvel, NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy( mass, d_mass,NUM_BODIES*sizeof(float), hipMemcpyDeviceToHost);
//printf("EndK2\n");
if (step%RENDER_INTERVAL==0)
{
std::cout << "\nWriting frame " << step;
if (DEBUG_INFO) {std::cout << "\nClearing Pixels..." << std::flush;}
renderClear(image, hdImage);
if (DEBUG_INFO) {std::cout << "\nRendering Particles..." << std::flush;}
//renderBodies(pos, vel, hdImage);
hipMemcpy(d_hdImage,hdImage,WIDTH*HEIGHT*3*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( GPUrenderBodies), dim3(nBlocks+1),dim3(1024), 0, 0, d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,d_hdImage);
hipDeviceSynchronize();
hipMemcpy(hdImage,d_hdImage,WIDTH*HEIGHT*3*sizeof(float),hipMemcpyDeviceToHost);
if (DEBUG_INFO) {std::cout << "\nWriting frame to file..." << std::flush;}
writeRender(image, hdImage, step);
}
if (DEBUG_INFO) {std::cout << "\n-------Done------- timestep: "
<< step << "\n" << std::flush;}
}
}
__global__ void clearF(float* Fx, float* Fy, float* Fz)
{
int id= blockIdx.x*blockDim.x+threadIdx.x;
if(id<NUM_BODIES)
{
Fx[id]=0.0; Fy[id]=0.0; Fz[id]=0.0;
}
}
__global__ void interactn2(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz)
{
long id = blockDim.x * blockIdx.x + threadIdx.x+ blockDim.y * blockIdx.y;
long i=id%NUM_BODIES;
long j=id/NUM_BODIES;
if(i < NUM_BODIES && j< NUM_BODIES && i!=j)
{
float v1 = (xpos[i]-xpos[j])*TO_METERS;
float v2 = (ypos[i]-ypos[j])*TO_METERS;
float v3 = (zpos[i]-zpos[j])*TO_METERS;
float dist = sqrt(v1*v1+v2*v2+v3*v3);
float F = TIME_STEP*(G*mass[i]*mass[j]) / ((dist*dist + SOFTENING*SOFTENING) * dist);
atomicAdd(&Fx[i],F*v1/mass[i]);
atomicAdd(&Fy[i],F*v2/mass[i]);
atomicAdd(&Fz[i],F*v3/mass[i]);
}
}
__global__ void updateAll(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<NUM_BODIES)
{
xvel[i] -= Fx[i];
yvel[i] -= Fy[i];
zvel[i] -= Fz[i];
xpos[i] += TIME_STEP*xvel[i]/TO_METERS;
ypos[i] += TIME_STEP*yvel[i]/TO_METERS;
zpos[i] += TIME_STEP*zvel[i]/TO_METERS;
}
}
float magnitude(vec3 v)
{
return sqrt(v.x*v.x+v.y*v.y+v.z*v.z);
}
void renderClear(char* image, float* hdImage)
{
for (int i=0; i<WIDTH*HEIGHT*3; i++)
{
image[i] = 0; //char(image[i]/1.2);
hdImage[i] = 0.0;
}
}
__global__ void GPUrenderBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* hdImage)
{
/// ORTHOGONAL PROJECTION
int i = blockIdx.x*blockDim.x+threadIdx.x;
float velocityMax = MAX_VEL_COLOR; //35000
float velocityMin = sqrt(0.8*(G*(SOLAR_MASS+EXTRA_MASS*SOLAR_MASS))/
(SYSTEM_SIZE*TO_METERS)); //MIN_VEL_COLOR;
if(i<NUM_BODIES)
{
float vxsqr=xvel[i]*xvel[i];
float vysqr=yvel[i]*yvel[i];
float vzsqr=zvel[i]*zvel[i];
float vMag = sqrt(vxsqr+vysqr+vzsqr);
int x = (WIDTH/2.0)*(1.0+xpos[i]/(SYSTEM_SIZE*RENDER_SCALE));
int y = (HEIGHT/2.0)*(1.0+ypos[i]/(SYSTEM_SIZE*RENDER_SCALE));
if (x>DOT_SIZE && x<WIDTH-DOT_SIZE && y>DOT_SIZE && y<HEIGHT-DOT_SIZE)
{
float vPortion = sqrt((vMag-velocityMin) / velocityMax);
float xPixel = (WIDTH/2.0)*(1.0+xpos[i]/(SYSTEM_SIZE*RENDER_SCALE));
float yPixel = (HEIGHT/2.0)*(1.0+ypos[i]/(SYSTEM_SIZE*RENDER_SCALE));
float xP = floor(xPixel);
float yP = floor(yPixel);
color c;
c.r = max(min(4*(vPortion-0.333),1.0),0.0);
c.g = max(min(min(4*vPortion,4.0*(1.0-vPortion)),1.0),0.0);
c.b = max(min(4*(0.5-vPortion),1.0),0.0);
for (int a=-DOT_SIZE/2; a<DOT_SIZE/2; a++)
{
for (int b=-DOT_SIZE/2; b<DOT_SIZE/2; b++)
{
float cFactor = PARTICLE_BRIGHTNESS /(pow(exp(pow(PARTICLE_SHARPNESS*(xP+a-xPixel),2.0)) + exp(pow(PARTICLE_SHARPNESS*(yP+b-yPixel),2.0)),/*1.25*/0.75)+1.0);
//colorAt(int(xP+a),int(yP+b),c, cFactor, hdImage);
int pix = 3*(xP+a+WIDTH*(yP+b));
hdImage[pix+0] += c.r*cFactor;
hdImage[pix+1] += c.g*cFactor;
hdImage[pix+2] += c.b*cFactor;
}
}
}
}
}
float clamp(float x)
{
return max(min(x,1.0),0.0);
}
void writeRender(char* data, float* hdImage, int step)
{
for (int i=0; i<WIDTH*HEIGHT*3; i++)
{
data[i] = int(255.0*clamp(hdImage[i]));
}
int frame = step/RENDER_INTERVAL + 1;//RENDER_INTERVAL;
std::string name = "images/Step";
int i = 0;
if (frame == 1000) i++; // Evil hack to avoid extra 0 at 1000
for (i; i<4-floor(log(frame)/log(10)); i++)
{
name.append("0");
}
name.append(std::to_string(frame));
name.append(".ppm");
std::ofstream file (name, std::ofstream::binary);
if (file.is_open())
{
// size = file.tellg();
file << "P6\n" << WIDTH << " " << HEIGHT << "\n" << "255\n";
file.write(data, WIDTH*HEIGHT*3);
file.close();
}
}
|
88fb32eb4d187d961a0481ddb41365feb5673236.cu
|
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <random>
#include "Constants.h"
#include <cuda.h>
void initializeBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass);
void runSimulation(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, char* image, float* hdImage);
//__global__ void interactBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass);
__global__ void clearF(float* Fx, float* Fy, float* Fz);
__global__ void interactn2(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz);
__global__ void updateAll(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz);
float magnitude(vec3 v);
void renderClear(char* image, float* hdImage);
__global__ void GPUrenderBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* hdImage);
float clamp(float x);
void writeRender(char* data, float* hdImage, int step);
int main()
{
std::cout << SYSTEM_THICKNESS << "AU thick disk\n";;
char *image = new char[WIDTH*HEIGHT*3];
float *hdImage = new float[WIDTH*HEIGHT*3];
//struct body *bodies = new struct body[NUM_BODIES];
float* xpos = new float[NUM_BODIES];
float* ypos = new float[NUM_BODIES];
float* zpos = new float[NUM_BODIES];
float* xvel = new float[NUM_BODIES];
float* yvel = new float[NUM_BODIES];
float* zvel = new float[NUM_BODIES];
float* mass = new float[NUM_BODIES];
initializeBodies(xpos,ypos,zpos,xvel,yvel,zvel,mass);
runSimulation(xpos,ypos,zpos,xvel,yvel,zvel,mass, image, hdImage);
std::cout << "\nwe made it\n";
delete[] image;
return 0;
}
void initializeBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass)
{
using std::uniform_real_distribution;
uniform_real_distribution<float> randAngle (0.0, 200.0*PI);
uniform_real_distribution<float> randRadius (INNER_BOUND, SYSTEM_SIZE);
uniform_real_distribution<float> randHeight (0.0, SYSTEM_THICKNESS);
std::default_random_engine gen (0);
float angle;
float radius;
float velocity;
//STARS
velocity = 0.67*sqrt((G*SOLAR_MASS)/(4*BINARY_SEPARATION*TO_METERS));
//STAR 1
xpos[0] = 0.0;///-BINARY_SEPARATION;
ypos[0] = 0.0;
zpos[0] = 0.0;
xvel[0] = 0.0;
yvel[0] = 0.0;//velocity;
zvel[0] = 0.0;
mass[0] = SOLAR_MASS;
///STARTS AT NUMBER OF STARS///
float totalExtraMass = 0.0;
for (int i=1; i<NUM_BODIES; i++)
{
angle = randAngle(gen);
radius = sqrt(SYSTEM_SIZE)*sqrt(randRadius(gen));
velocity = pow(((G*(SOLAR_MASS+((radius-INNER_BOUND)/SYSTEM_SIZE)*EXTRA_MASS*SOLAR_MASS))
/ (radius*TO_METERS)), 0.5);
xpos[i] = radius*cos(angle);
ypos[i] = radius*sin(angle);
zpos[i] = randHeight(gen)-SYSTEM_THICKNESS/2;
xvel[i] = velocity*sin(angle);
yvel[i] = -velocity*cos(angle);
zvel[i] = 0.0;
mass[i] = (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES;
totalExtraMass += (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES;
}
std::cout << "\nTotal Disk Mass: " << totalExtraMass;
std::cout << "\nEach Particle weight: " << (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES
<< "\n______________________________\n";
}
void runSimulation(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, char* image, float* hdImage)
{
float *d_xpos; float *d_ypos; float *d_zpos; float *d_xvel; float *d_yvel; float *d_zvel; float *d_mass; char *d_image; float *d_hdImage;
float *Fx; float *Fy; float* Fz;
cudaMalloc(&d_xpos,NUM_BODIES*sizeof(float));
cudaMalloc(&d_ypos,NUM_BODIES*sizeof(float));
cudaMalloc(&d_zpos,NUM_BODIES*sizeof(float));
cudaMalloc(&d_xvel,NUM_BODIES*sizeof(float));
cudaMalloc(&d_yvel,NUM_BODIES*sizeof(float));
cudaMalloc(&d_zvel,NUM_BODIES*sizeof(float));
cudaMalloc(&d_mass,NUM_BODIES*sizeof(float));
cudaMalloc(&Fx, NUM_BODIES*sizeof(float));
cudaMalloc(&Fy, NUM_BODIES*sizeof(float));
cudaMalloc(&Fz, NUM_BODIES*sizeof(float));
cudaMalloc(&d_image,WIDTH*HEIGHT*3*sizeof(char));
cudaMalloc(&d_hdImage,WIDTH*HEIGHT*3*sizeof(float));
int nBlocks=(NUM_BODIES+1024-1)/1024;
long nsqrBlocks=(NUM_BODIES/1024)*(NUM_BODIES/1024);
dim3 grid(nsqrBlocks,1024,1);
//createFirstFrame
renderClear(image, hdImage);
cudaMemcpy(d_hdImage,hdImage,WIDTH*HEIGHT*3*sizeof(float),cudaMemcpyHostToDevice);
GPUrenderBodies<<<nBlocks+1,1024>>>(d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,d_hdImage);
cudaMemcpy(hdImage,d_hdImage,WIDTH*HEIGHT*3*sizeof(float),cudaMemcpyDeviceToHost);
writeRender(image, hdImage, 1);
for (int step=1; step<STEP_COUNT; step++)
{
std::cout << "\nBeginning timestep: " << step;
printf("\nStartK\n");
cudaMemcpy(d_xpos, xpos, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ypos, ypos, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_zpos, zpos, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_xvel, xvel, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_yvel, yvel, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_zvel, zvel, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mass, mass, NUM_BODIES*sizeof(float), cudaMemcpyHostToDevice);
printf("StartK1\n");
clearF<<<nBlocks+1,1024>>>(Fx,Fy,Fz);
cudaDeviceSynchronize();
interactn2<<<grid,1024>>>(d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,Fx,Fy,Fz);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
printf("CUDA error:%s\n",cudaGetErrorString(error));
}
updateAll<<<nBlocks+1,1024>>>(d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,Fx,Fy,Fz);
printf("EndK\n");
cudaMemcpy( xpos,d_xpos, NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( ypos, d_ypos,NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( zpos,d_zpos, NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( xvel,d_xvel, NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( yvel,d_yvel, NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( zvel,d_zvel, NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( mass, d_mass,NUM_BODIES*sizeof(float), cudaMemcpyDeviceToHost);
//printf("EndK2\n");
if (step%RENDER_INTERVAL==0)
{
std::cout << "\nWriting frame " << step;
if (DEBUG_INFO) {std::cout << "\nClearing Pixels..." << std::flush;}
renderClear(image, hdImage);
if (DEBUG_INFO) {std::cout << "\nRendering Particles..." << std::flush;}
//renderBodies(pos, vel, hdImage);
cudaMemcpy(d_hdImage,hdImage,WIDTH*HEIGHT*3*sizeof(float),cudaMemcpyHostToDevice);
GPUrenderBodies<<<nBlocks+1,1024>>>(d_xpos,d_ypos,d_zpos,d_xvel,d_yvel,d_zvel,d_mass,d_hdImage);
cudaDeviceSynchronize();
cudaMemcpy(hdImage,d_hdImage,WIDTH*HEIGHT*3*sizeof(float),cudaMemcpyDeviceToHost);
if (DEBUG_INFO) {std::cout << "\nWriting frame to file..." << std::flush;}
writeRender(image, hdImage, step);
}
if (DEBUG_INFO) {std::cout << "\n-------Done------- timestep: "
<< step << "\n" << std::flush;}
}
}
__global__ void clearF(float* Fx, float* Fy, float* Fz)
{
int id= blockIdx.x*blockDim.x+threadIdx.x;
if(id<NUM_BODIES)
{
Fx[id]=0.0; Fy[id]=0.0; Fz[id]=0.0;
}
}
__global__ void interactn2(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz)
{
long id = blockDim.x * blockIdx.x + threadIdx.x+ blockDim.y * blockIdx.y;
long i=id%NUM_BODIES;
long j=id/NUM_BODIES;
if(i < NUM_BODIES && j< NUM_BODIES && i!=j)
{
float v1 = (xpos[i]-xpos[j])*TO_METERS;
float v2 = (ypos[i]-ypos[j])*TO_METERS;
float v3 = (zpos[i]-zpos[j])*TO_METERS;
float dist = sqrt(v1*v1+v2*v2+v3*v3);
float F = TIME_STEP*(G*mass[i]*mass[j]) / ((dist*dist + SOFTENING*SOFTENING) * dist);
atomicAdd(&Fx[i],F*v1/mass[i]);
atomicAdd(&Fy[i],F*v2/mass[i]);
atomicAdd(&Fz[i],F*v3/mass[i]);
}
}
__global__ void updateAll(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* Fx, float* Fy, float* Fz)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<NUM_BODIES)
{
xvel[i] -= Fx[i];
yvel[i] -= Fy[i];
zvel[i] -= Fz[i];
xpos[i] += TIME_STEP*xvel[i]/TO_METERS;
ypos[i] += TIME_STEP*yvel[i]/TO_METERS;
zpos[i] += TIME_STEP*zvel[i]/TO_METERS;
}
}
float magnitude(vec3 v)
{
return sqrt(v.x*v.x+v.y*v.y+v.z*v.z);
}
void renderClear(char* image, float* hdImage)
{
for (int i=0; i<WIDTH*HEIGHT*3; i++)
{
image[i] = 0; //char(image[i]/1.2);
hdImage[i] = 0.0;
}
}
__global__ void GPUrenderBodies(float* xpos, float* ypos, float* zpos, float* xvel, float* yvel, float* zvel, float* mass, float* hdImage)
{
/// ORTHOGONAL PROJECTION
int i = blockIdx.x*blockDim.x+threadIdx.x;
float velocityMax = MAX_VEL_COLOR; //35000
float velocityMin = sqrt(0.8*(G*(SOLAR_MASS+EXTRA_MASS*SOLAR_MASS))/
(SYSTEM_SIZE*TO_METERS)); //MIN_VEL_COLOR;
if(i<NUM_BODIES)
{
float vxsqr=xvel[i]*xvel[i];
float vysqr=yvel[i]*yvel[i];
float vzsqr=zvel[i]*zvel[i];
float vMag = sqrt(vxsqr+vysqr+vzsqr);
int x = (WIDTH/2.0)*(1.0+xpos[i]/(SYSTEM_SIZE*RENDER_SCALE));
int y = (HEIGHT/2.0)*(1.0+ypos[i]/(SYSTEM_SIZE*RENDER_SCALE));
if (x>DOT_SIZE && x<WIDTH-DOT_SIZE && y>DOT_SIZE && y<HEIGHT-DOT_SIZE)
{
float vPortion = sqrt((vMag-velocityMin) / velocityMax);
float xPixel = (WIDTH/2.0)*(1.0+xpos[i]/(SYSTEM_SIZE*RENDER_SCALE));
float yPixel = (HEIGHT/2.0)*(1.0+ypos[i]/(SYSTEM_SIZE*RENDER_SCALE));
float xP = floor(xPixel);
float yP = floor(yPixel);
color c;
c.r = max(min(4*(vPortion-0.333),1.0),0.0);
c.g = max(min(min(4*vPortion,4.0*(1.0-vPortion)),1.0),0.0);
c.b = max(min(4*(0.5-vPortion),1.0),0.0);
for (int a=-DOT_SIZE/2; a<DOT_SIZE/2; a++)
{
for (int b=-DOT_SIZE/2; b<DOT_SIZE/2; b++)
{
float cFactor = PARTICLE_BRIGHTNESS /(pow(exp(pow(PARTICLE_SHARPNESS*(xP+a-xPixel),2.0)) + exp(pow(PARTICLE_SHARPNESS*(yP+b-yPixel),2.0)),/*1.25*/0.75)+1.0);
//colorAt(int(xP+a),int(yP+b),c, cFactor, hdImage);
int pix = 3*(xP+a+WIDTH*(yP+b));
hdImage[pix+0] += c.r*cFactor;
hdImage[pix+1] += c.g*cFactor;
hdImage[pix+2] += c.b*cFactor;
}
}
}
}
}
float clamp(float x)
{
return max(min(x,1.0),0.0);
}
void writeRender(char* data, float* hdImage, int step)
{
for (int i=0; i<WIDTH*HEIGHT*3; i++)
{
data[i] = int(255.0*clamp(hdImage[i]));
}
int frame = step/RENDER_INTERVAL + 1;//RENDER_INTERVAL;
std::string name = "images/Step";
int i = 0;
if (frame == 1000) i++; // Evil hack to avoid extra 0 at 1000
for (i; i<4-floor(log(frame)/log(10)); i++)
{
name.append("0");
}
name.append(std::to_string(frame));
name.append(".ppm");
std::ofstream file (name, std::ofstream::binary);
if (file.is_open())
{
// size = file.tellg();
file << "P6\n" << WIDTH << " " << HEIGHT << "\n" << "255\n";
file.write(data, WIDTH*HEIGHT*3);
file.close();
}
}
|
98d873f03b6daf1177bbe873b5f83c8d4ca94686.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "cuda_math.cuh"
typedef unsigned char uchar;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned long ulong;
typedef unsigned long long uint64;
//-------------------------------- GVDB Data Structure
#define CUDA_PATHWAY
#include "cuda_gvdb_scene.cuh" // GVDB Scene
#include "cuda_gvdb_nodes.cuh" // GVDB Node structure
#include "cuda_gvdb_geom.cuh" // GVDB Geom helpers
#include "cuda_gvdb_dda.cuh" // GVDB DDA
#include "cuda_gvdb_raycast.cuh" // GVDB Raycasting
//--------------------------------
inline __host__ __device__ float3 reflect3 (float3 i, float3 n)
{
return i - 2.0f * n * dot(n,i);
}
// Custom raycast kernel
extern "C" __global__ void raycast_kernel ( uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float4 clr = make_float4(1,1,1,1);
float3 norm;
float3 rdir = normalize ( getViewRay ( (float(x)+0.5)/scn.width, (float(y)+0.5)/scn.height ) );
// Ray march - trace a ray into GVDB and find the closest hit point
rayCast ( SCN_SHADE, gvdb.top_lev, 0, scn.campos, rdir, hit, norm, clr, raySurfaceBrick );
if ( hit.z != NOHIT) {
float3 lightdir = normalize ( scn.light_pos - hit );
// Shading - custom look
float3 eyedir = normalize ( scn.campos - hit );
float3 R = normalize ( reflect3 ( eyedir, norm ) ); // reflection vector
float diffuse = max(0.0f, dot( norm, lightdir ));
float refl = min(1.0f, max(0.0f, R.y ));
clr = diffuse*0.6 + refl * make_float4(0,0.3,0.7, 1.0);
} else {
clr = make_float4 ( 0.0, 0.0, 0.1, 1.0 );
}
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 );
}
|
98d873f03b6daf1177bbe873b5f83c8d4ca94686.cu
|
#include <stdio.h>
#include "cuda_math.cuh"
typedef unsigned char uchar;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned long ulong;
typedef unsigned long long uint64;
//-------------------------------- GVDB Data Structure
#define CUDA_PATHWAY
#include "cuda_gvdb_scene.cuh" // GVDB Scene
#include "cuda_gvdb_nodes.cuh" // GVDB Node structure
#include "cuda_gvdb_geom.cuh" // GVDB Geom helpers
#include "cuda_gvdb_dda.cuh" // GVDB DDA
#include "cuda_gvdb_raycast.cuh" // GVDB Raycasting
//--------------------------------
inline __host__ __device__ float3 reflect3 (float3 i, float3 n)
{
return i - 2.0f * n * dot(n,i);
}
// Custom raycast kernel
extern "C" __global__ void raycast_kernel ( uchar4* outBuf )
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x >= scn.width || y >= scn.height ) return;
float3 hit = make_float3(NOHIT,NOHIT,NOHIT);
float4 clr = make_float4(1,1,1,1);
float3 norm;
float3 rdir = normalize ( getViewRay ( (float(x)+0.5)/scn.width, (float(y)+0.5)/scn.height ) );
// Ray march - trace a ray into GVDB and find the closest hit point
rayCast ( SCN_SHADE, gvdb.top_lev, 0, scn.campos, rdir, hit, norm, clr, raySurfaceBrick );
if ( hit.z != NOHIT) {
float3 lightdir = normalize ( scn.light_pos - hit );
// Shading - custom look
float3 eyedir = normalize ( scn.campos - hit );
float3 R = normalize ( reflect3 ( eyedir, norm ) ); // reflection vector
float diffuse = max(0.0f, dot( norm, lightdir ));
float refl = min(1.0f, max(0.0f, R.y ));
clr = diffuse*0.6 + refl * make_float4(0,0.3,0.7, 1.0);
} else {
clr = make_float4 ( 0.0, 0.0, 0.1, 1.0 );
}
outBuf [ y*scn.width + x ] = make_uchar4( clr.x*255, clr.y*255, clr.z*255, 255 );
}
|
a56d37af0e278337e3daec5a647e377d40617f95.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#define TPB 256
#define EPSILON 0.0005
#define NTIME 1
#define ARRAY_SIZE 163840000
unsigned long get_time();
unsigned long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
unsigned long ret = tv.tv_usec;
ret /= 1000;
ret += (tv.tv_sec * 1000);
return ret;
}
__global__ void SAXPYgpuKernel(float *x, float *y, float a)
{
const long i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = x[i]*a + y[i];
}
void SAXPYcpu(float* x, float* y, float a){
for (long i = 0; i < ARRAY_SIZE ; i++){
y[i] = x[i]*a + y[i];
}
}
bool equalVectors(float* a, float* b){
for (long i = 0; i < ARRAY_SIZE; i++){
if (std::abs(a[i] - b[i]) > EPSILON){
return false;
}
}
return true;
}
int main()
{
// seed for random number
srand (static_cast <unsigned> (time(0)));
// declare constant a
const float a = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Declare pointers for an array of floats
float* x = (float*) malloc (sizeof(float)*ARRAY_SIZE);
float* y = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// set random values
for (long i = 0 ; i < ARRAY_SIZE ; ++i){
x[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
y[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
// CPU execution
long start_time_cpu = get_time();
float* x_cpu = (float*) malloc (sizeof(float)*ARRAY_SIZE);
float* y_cpu = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// copy vector to use in the CPU
std::memcpy(x_cpu, x, ARRAY_SIZE*sizeof(float));
std::memcpy(y_cpu, y, ARRAY_SIZE*sizeof(float));
printf("Computing SAXPY on the CPU");
SAXPYcpu(x_cpu, y_cpu, a);
printf("Done\n");
long end_time_cpu = get_time();
// GPU execution
// Declare pointers for an array of floats
long start_time_gpu = get_time();
float* x_gpu = 0;
float* y_gpu = 0;
float* y_gpu_res = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// Allocate device memory
hipMalloc(&x_gpu, ARRAY_SIZE*sizeof(float));
hipMalloc(&y_gpu, ARRAY_SIZE*sizeof(float));
// Copy array to device
hipMemcpy(x_gpu, x, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
// Launch kernel to compute SAXPY
printf("Computing SAXPY on the GPU...");
hipLaunchKernelGGL(( SAXPYgpuKernel), dim3((ARRAY_SIZE+TPB-1)/TPB), dim3(TPB), 0, 0, x_gpu, y_gpu, a);
// Synchronize device
hipDeviceSynchronize();
// Copy back from device to CPU
hipMemcpy(y_gpu_res, y_gpu, ARRAY_SIZE*sizeof(float), hipMemcpyDeviceToHost);
printf("Done\n");
long end_time_gpu = get_time();
// Compare results
printf("Comparing the output for each implementation for ARRAY_SIZE = %d; Comparison: ", ARRAY_SIZE);
equalVectors(y_gpu_res, y_cpu) ? printf("Correct\n") : printf("Uncorrect\n");
printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu);
printf("GPU time: %ld ms\n\n", end_time_gpu-start_time_gpu);
// Free the memory
hipFree(x_gpu);
hipFree(y_gpu);
free(x);
free(y);
free(x_cpu);
free(y_cpu);
free(y_gpu_res);
return 0;
}
|
a56d37af0e278337e3daec5a647e377d40617f95.cu
|
#include <stdio.h>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#define TPB 256
#define EPSILON 0.0005
#define NTIME 1
#define ARRAY_SIZE 163840000
unsigned long get_time();
unsigned long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
unsigned long ret = tv.tv_usec;
ret /= 1000;
ret += (tv.tv_sec * 1000);
return ret;
}
__global__ void SAXPYgpuKernel(float *x, float *y, float a)
{
const long i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = x[i]*a + y[i];
}
void SAXPYcpu(float* x, float* y, float a){
for (long i = 0; i < ARRAY_SIZE ; i++){
y[i] = x[i]*a + y[i];
}
}
bool equalVectors(float* a, float* b){
for (long i = 0; i < ARRAY_SIZE; i++){
if (std::abs(a[i] - b[i]) > EPSILON){
return false;
}
}
return true;
}
int main()
{
// seed for random number
srand (static_cast <unsigned> (time(0)));
// declare constant a
const float a = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
// Declare pointers for an array of floats
float* x = (float*) malloc (sizeof(float)*ARRAY_SIZE);
float* y = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// set random values
for (long i = 0 ; i < ARRAY_SIZE ; ++i){
x[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
y[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
// CPU execution
long start_time_cpu = get_time();
float* x_cpu = (float*) malloc (sizeof(float)*ARRAY_SIZE);
float* y_cpu = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// copy vector to use in the CPU
std::memcpy(x_cpu, x, ARRAY_SIZE*sizeof(float));
std::memcpy(y_cpu, y, ARRAY_SIZE*sizeof(float));
printf("Computing SAXPY on the CPU…");
SAXPYcpu(x_cpu, y_cpu, a);
printf("Done\n");
long end_time_cpu = get_time();
// GPU execution
// Declare pointers for an array of floats
long start_time_gpu = get_time();
float* x_gpu = 0;
float* y_gpu = 0;
float* y_gpu_res = (float*) malloc (sizeof(float)*ARRAY_SIZE);
// Allocate device memory
cudaMalloc(&x_gpu, ARRAY_SIZE*sizeof(float));
cudaMalloc(&y_gpu, ARRAY_SIZE*sizeof(float));
// Copy array to device
cudaMemcpy(x_gpu, x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
// Launch kernel to compute SAXPY
printf("Computing SAXPY on the GPU...");
SAXPYgpuKernel<<<(ARRAY_SIZE+TPB-1)/TPB, TPB>>>(x_gpu, y_gpu, a);
// Synchronize device
cudaDeviceSynchronize();
// Copy back from device to CPU
cudaMemcpy(y_gpu_res, y_gpu, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
printf("Done\n");
long end_time_gpu = get_time();
// Compare results
printf("Comparing the output for each implementation for ARRAY_SIZE = %d; Comparison: ", ARRAY_SIZE);
equalVectors(y_gpu_res, y_cpu) ? printf("Correct\n") : printf("Uncorrect\n");
printf("CPU time: %ld ms\n", end_time_cpu-start_time_cpu);
printf("GPU time: %ld ms\n\n", end_time_gpu-start_time_gpu);
// Free the memory
cudaFree(x_gpu);
cudaFree(y_gpu);
free(x);
free(y);
free(x_cpu);
free(y_cpu);
free(y_gpu_res);
return 0;
}
|
4df0fc3e5bd11f7713e2a01db97c8ffe8e8e54a9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// All C includes must be wrapped in extern "C"
extern "C" {
#include "global.h"
#include "util.h"
#include "constants.h"
}
#include <assert.h>
#include <stdio.h>
#define PIXEL_SIZE (512)
#define TILE_SIZE (16)
#define GRID_SIZE (PIXEL_SIZE/TILE_SIZE)
#define NUM_GPU (2)
#define RRP (100)
float *lens_x;
float *lens_y;
float *lens_mass;
size_t nobjects;
/* Pointers to the lens x,y co-ordinates on the GPU device */
float *d_lens_x[NUM_GPU];
float *d_lens_y[NUM_GPU];
float *d_lens_mass[NUM_GPU];
typedef struct vars {
unsigned int rpp;
float kappa_c, gamma_, source_scale;
float image_scale_x, image_scale_y;
float increment_x, increment_y;
} vars;
void init_var(vars *var) {
var->rpp = RRP/NUM_GPU;
var->kappa_c = kappa_c;
var->gamma_ = gamma_;
var->source_scale = source_scale;
var->image_scale_x = image_scale_x;
var->image_scale_y = image_scale_y;
var->increment_x = 0;
var->increment_y = 0;
}
int total_r(unsigned int *results, unsigned int size){
unsigned int i, total = 0;
for(i = 0; i < size; ++i){
total += results[i];
}
return total;
}
int highest(unsigned int *results, unsigned int size) {
unsigned int i, highest_count = 0;
for(i = 0; i < size; ++i){
if (results[i] > highest_count)
highest_count = results[i];
}
return highest_count;
}
__global__ void group_glensing(const float *lens_x, const float *lens_y, const float *lens_mass, const size_t nobjects, unsigned int* results, const vars* v) {
const unsigned int row = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y*blockDim.y + threadIdx.y;
const float initial_x = (-v->image_scale_x) + row*v->increment_x;
const float initial_y = (-v->image_scale_y) + col*v->increment_y;
const size_t ARRSIZE = 100;
const unsigned int uniform_box = sqrtf((float)v->rpp);
float start_x[ARRSIZE], start_y[ARRSIZE];
float group_x[ARRSIZE], group_y[ARRSIZE];
unsigned int it, noise_x, noise_y;
size_t j, k;
unsigned int px, py;
float dist;
for(it = 0; it < v->rpp/ARRSIZE; ++it) {
// Set up the co-ordinates for the group
for(j = 0; j < ARRSIZE; ++j) {
noise_x = ((it * ARRSIZE) + j) % uniform_box;
noise_y = ((it * ARRSIZE) + j) / uniform_box;
start_x[j] = initial_x + noise_x * (v->increment_x / uniform_box);
start_y[j] = initial_y + noise_y * (v->increment_y / uniform_box);
group_x[j] = (1-v->gamma_)*start_x[j] - v->kappa_c*start_x[j];
group_y[j] = (1+v->gamma_)*start_y[j] - v->kappa_c*start_y[j];
}
// Calculate the impact of each lens
float lm, lx, ly;
for(k = 0; k < nobjects; ++k) {
lx = lens_x[k];
ly = lens_y[k];
lm = lens_mass[k];
for(j = 0; j < ARRSIZE; ++j) {
dist = pow(start_x[j] - lx, 2) + pow(start_y[j] - ly, 2);
group_x[j] -= lm * (start_x[j] - lx) / dist;
group_x[j] -= lm * (start_y[j] - ly) / dist;
}
}
// Plot the output for each of the rays
for(j = 0; j < ARRSIZE; ++j) {
const float source_scale = v->source_scale;
if ((group_x[j] >= -source_scale/2) && (group_x[j] <= source_scale/2) &&
(group_y[j] >= -source_scale/2) && (group_y[j] <= source_scale/2)) {
px = (group_x[j] + source_scale/2) / (source_scale/PIXEL_SIZE);
py = PIXEL_SIZE - (group_y[j] + source_scale/2) / (source_scale/PIXEL_SIZE);
atomicAdd(&results[py * PIXEL_SIZE + px], 1);
}
}
}
}
__global__ void glensing(const float *lens_x, const float *lens_y, const float *lens_mass, const size_t nobjects, unsigned int* results, const vars* v) {
const unsigned int row = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y*blockDim.y + threadIdx.y;
const float initial_x = (-v->image_scale_x) + row*v->increment_x;
const float initial_y = (-v->image_scale_y) + col*v->increment_y;
const unsigned int uniform_box = sqrtf((float)v->rpp);
float start_x, start_y, dx, dy;
unsigned int it, noise_x, noise_y;
size_t k;
float dist;
// TODO: Perform multiple ray calculations simultaneously
// BUG: A larger value (> 100) of rpp results in a completely blank image
for(it = 0; it < v->rpp; ++it) {
noise_x = it % uniform_box;
noise_y = it / uniform_box;
start_x = initial_x + noise_x * v->increment_x / uniform_box;
start_y = initial_y + noise_y * v->increment_y / uniform_box;
dx = (1-v->gamma_)*start_x - v->kappa_c*start_x;
dy = (1+v->gamma_)*start_y - v->kappa_c*start_y;
for(k = 0; k < nobjects; ++k) {
dist = pow(start_x - lens_x[k], 2) + pow(start_y - lens_y[k], 2);
dx -= lens_mass[k] * (start_x - lens_x[k]) / dist;
dy -= lens_mass[k] * (start_y - lens_y[k]) / dist;
}
const float source_scale = v->source_scale;
if ((dx >= -source_scale/2) && (dx <= source_scale/2) &&
(dy >= -source_scale/2) && (dy <= source_scale/2)) {
int px = (dx + source_scale/2) / (source_scale/PIXEL_SIZE);
int py = PIXEL_SIZE - (dy + source_scale/2) / (source_scale/PIXEL_SIZE);
atomicAdd(&results[py * PIXEL_SIZE + px], 1);
//results[py * PIXEL_SIZE + px] += 1;
}
}
}
int main(int argc, char** argv) {
float increment_x, increment_y;
int t;
// Load relevant settings and data
if (argc < 2) error("Requires argument with lens positions and optional mass");
setup_constants();
vars *variables = (vars *)salloc(sizeof(vars));
init_var(variables);
read_lenses(argv[1]);
fprintf(stderr, "X %f and Y %f\n", image_scale_x, image_scale_y);
increment_x = (image_scale_x * 2) / PIXEL_SIZE;
increment_y = (image_scale_y * 2) / PIXEL_SIZE;
variables->increment_x = increment_x;
variables->increment_y = increment_y;
fprintf(stderr, "Increments for X %f and Y %f\n", increment_x, increment_y);
unsigned int *results[NUM_GPU];
for(t=0; t<NUM_GPU; ++t)
results[t] = (unsigned int *)calloc(NUM_GPU * PIXEL_SIZE * PIXEL_SIZE, sizeof(unsigned int));
unsigned int *d_results[NUM_GPU];
if (!results) error("calloc failed in allocating the result array");
// Setting up CUDA global memory
vars *d_variables[NUM_GPU];
int gpu_c;
for(gpu_c = 0; gpu_c < NUM_GPU; ++gpu_c){
hipSetDevice(gpu_c);
hipMalloc(&d_lens_x[gpu_c], sizeof(float) * nobjects);
hipMalloc(&d_lens_y[gpu_c], sizeof(float) * nobjects);
hipMalloc(&d_lens_mass[gpu_c], sizeof(float) * nobjects);
hipMalloc(&d_results[gpu_c], PIXEL_SIZE*PIXEL_SIZE*sizeof(unsigned int));
hipMalloc(&d_variables[gpu_c], sizeof(vars));
hipMemset(d_results[gpu_c], 0, PIXEL_SIZE*PIXEL_SIZE*sizeof(unsigned int));
hipMemcpy(d_variables[gpu_c], variables, sizeof(vars), hipMemcpyHostToDevice);
hipMemcpy(d_lens_x[gpu_c], lens_x, sizeof(float) * nobjects, hipMemcpyHostToDevice);
hipMemcpy(d_lens_y[gpu_c], lens_y, sizeof(float) * nobjects, hipMemcpyHostToDevice);
hipMemcpy(d_lens_mass[gpu_c], lens_mass, sizeof(float) * nobjects, hipMemcpyHostToDevice);
// Perform gravitational microlensing
dim3 bdim(TILE_SIZE, TILE_SIZE);
dim3 gdim(GRID_SIZE, GRID_SIZE);
hipLaunchKernelGGL(( glensing), dim3(gdim), dim3(bdim), 0, 0, d_lens_x[gpu_c], d_lens_y[gpu_c], d_lens_mass[gpu_c], nobjects, d_results[gpu_c], d_variables[gpu_c]);
hipMemcpy(results[gpu_c], d_results[gpu_c], PIXEL_SIZE*PIXEL_SIZE*sizeof(unsigned int), hipMemcpyDeviceToHost);
}
//group_glensing<<<gdim, bdim>>>(d_lens_x, d_lens_y, d_lens_mass, nobjects, d_results, d_variables);
unsigned int *final_result = (unsigned int *)calloc(PIXEL_SIZE * PIXEL_SIZE, sizeof(unsigned int));
int r_c=0;
for(; r_c < PIXEL_SIZE*PIXEL_SIZE; ++r_c){
for(t=0; t<NUM_GPU; ++t)
final_result[r_c] += results[t][r_c];
}
int total = total_r(final_result, PIXEL_SIZE * PIXEL_SIZE);
printf("The total num of rays is %d\n", total);
int highest_c = highest(final_result, PIXEL_SIZE * PIXEL_SIZE);
write_pgm(final_result, PIXEL_SIZE, PIXEL_SIZE, highest_c);
// Free the memory allocated during processing
// GPU
hipFree(d_lens_x);
hipFree(d_lens_y);
hipFree(d_lens_mass);
hipFree(d_results);
hipFree(d_variables);
// CPU
free(lens_x);
free(lens_y);
free(lens_mass);
//free(results);
return 0;
}
|
4df0fc3e5bd11f7713e2a01db97c8ffe8e8e54a9.cu
|
// All C includes must be wrapped in extern "C"
extern "C" {
#include "global.h"
#include "util.h"
#include "constants.h"
}
#include <assert.h>
#include <stdio.h>
#define PIXEL_SIZE (512)
#define TILE_SIZE (16)
#define GRID_SIZE (PIXEL_SIZE/TILE_SIZE)
#define NUM_GPU (2)
#define RRP (100)
float *lens_x;
float *lens_y;
float *lens_mass;
size_t nobjects;
/* Pointers to the lens x,y co-ordinates on the GPU device */
float *d_lens_x[NUM_GPU];
float *d_lens_y[NUM_GPU];
float *d_lens_mass[NUM_GPU];
typedef struct vars {
unsigned int rpp;
float kappa_c, gamma_, source_scale;
float image_scale_x, image_scale_y;
float increment_x, increment_y;
} vars;
void init_var(vars *var) {
var->rpp = RRP/NUM_GPU;
var->kappa_c = kappa_c;
var->gamma_ = gamma_;
var->source_scale = source_scale;
var->image_scale_x = image_scale_x;
var->image_scale_y = image_scale_y;
var->increment_x = 0;
var->increment_y = 0;
}
int total_r(unsigned int *results, unsigned int size){
unsigned int i, total = 0;
for(i = 0; i < size; ++i){
total += results[i];
}
return total;
}
int highest(unsigned int *results, unsigned int size) {
unsigned int i, highest_count = 0;
for(i = 0; i < size; ++i){
if (results[i] > highest_count)
highest_count = results[i];
}
return highest_count;
}
__global__ void group_glensing(const float *lens_x, const float *lens_y, const float *lens_mass, const size_t nobjects, unsigned int* results, const vars* v) {
const unsigned int row = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y*blockDim.y + threadIdx.y;
const float initial_x = (-v->image_scale_x) + row*v->increment_x;
const float initial_y = (-v->image_scale_y) + col*v->increment_y;
const size_t ARRSIZE = 100;
const unsigned int uniform_box = sqrtf((float)v->rpp);
float start_x[ARRSIZE], start_y[ARRSIZE];
float group_x[ARRSIZE], group_y[ARRSIZE];
unsigned int it, noise_x, noise_y;
size_t j, k;
unsigned int px, py;
float dist;
for(it = 0; it < v->rpp/ARRSIZE; ++it) {
// Set up the co-ordinates for the group
for(j = 0; j < ARRSIZE; ++j) {
noise_x = ((it * ARRSIZE) + j) % uniform_box;
noise_y = ((it * ARRSIZE) + j) / uniform_box;
start_x[j] = initial_x + noise_x * (v->increment_x / uniform_box);
start_y[j] = initial_y + noise_y * (v->increment_y / uniform_box);
group_x[j] = (1-v->gamma_)*start_x[j] - v->kappa_c*start_x[j];
group_y[j] = (1+v->gamma_)*start_y[j] - v->kappa_c*start_y[j];
}
// Calculate the impact of each lens
float lm, lx, ly;
for(k = 0; k < nobjects; ++k) {
lx = lens_x[k];
ly = lens_y[k];
lm = lens_mass[k];
for(j = 0; j < ARRSIZE; ++j) {
dist = pow(start_x[j] - lx, 2) + pow(start_y[j] - ly, 2);
group_x[j] -= lm * (start_x[j] - lx) / dist;
group_x[j] -= lm * (start_y[j] - ly) / dist;
}
}
// Plot the output for each of the rays
for(j = 0; j < ARRSIZE; ++j) {
const float source_scale = v->source_scale;
if ((group_x[j] >= -source_scale/2) && (group_x[j] <= source_scale/2) &&
(group_y[j] >= -source_scale/2) && (group_y[j] <= source_scale/2)) {
px = (group_x[j] + source_scale/2) / (source_scale/PIXEL_SIZE);
py = PIXEL_SIZE - (group_y[j] + source_scale/2) / (source_scale/PIXEL_SIZE);
atomicAdd(&results[py * PIXEL_SIZE + px], 1);
}
}
}
}
__global__ void glensing(const float *lens_x, const float *lens_y, const float *lens_mass, const size_t nobjects, unsigned int* results, const vars* v) {
const unsigned int row = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y*blockDim.y + threadIdx.y;
const float initial_x = (-v->image_scale_x) + row*v->increment_x;
const float initial_y = (-v->image_scale_y) + col*v->increment_y;
const unsigned int uniform_box = sqrtf((float)v->rpp);
float start_x, start_y, dx, dy;
unsigned int it, noise_x, noise_y;
size_t k;
float dist;
// TODO: Perform multiple ray calculations simultaneously
// BUG: A larger value (> 100) of rpp results in a completely blank image
for(it = 0; it < v->rpp; ++it) {
noise_x = it % uniform_box;
noise_y = it / uniform_box;
start_x = initial_x + noise_x * v->increment_x / uniform_box;
start_y = initial_y + noise_y * v->increment_y / uniform_box;
dx = (1-v->gamma_)*start_x - v->kappa_c*start_x;
dy = (1+v->gamma_)*start_y - v->kappa_c*start_y;
for(k = 0; k < nobjects; ++k) {
dist = pow(start_x - lens_x[k], 2) + pow(start_y - lens_y[k], 2);
dx -= lens_mass[k] * (start_x - lens_x[k]) / dist;
dy -= lens_mass[k] * (start_y - lens_y[k]) / dist;
}
const float source_scale = v->source_scale;
if ((dx >= -source_scale/2) && (dx <= source_scale/2) &&
(dy >= -source_scale/2) && (dy <= source_scale/2)) {
int px = (dx + source_scale/2) / (source_scale/PIXEL_SIZE);
int py = PIXEL_SIZE - (dy + source_scale/2) / (source_scale/PIXEL_SIZE);
atomicAdd(&results[py * PIXEL_SIZE + px], 1);
//results[py * PIXEL_SIZE + px] += 1;
}
}
}
int main(int argc, char** argv) {
float increment_x, increment_y;
int t;
// Load relevant settings and data
if (argc < 2) error("Requires argument with lens positions and optional mass");
setup_constants();
vars *variables = (vars *)salloc(sizeof(vars));
init_var(variables);
read_lenses(argv[1]);
fprintf(stderr, "X %f and Y %f\n", image_scale_x, image_scale_y);
increment_x = (image_scale_x * 2) / PIXEL_SIZE;
increment_y = (image_scale_y * 2) / PIXEL_SIZE;
variables->increment_x = increment_x;
variables->increment_y = increment_y;
fprintf(stderr, "Increments for X %f and Y %f\n", increment_x, increment_y);
unsigned int *results[NUM_GPU];
for(t=0; t<NUM_GPU; ++t)
results[t] = (unsigned int *)calloc(NUM_GPU * PIXEL_SIZE * PIXEL_SIZE, sizeof(unsigned int));
unsigned int *d_results[NUM_GPU];
if (!results) error("calloc failed in allocating the result array");
// Setting up CUDA global memory
vars *d_variables[NUM_GPU];
int gpu_c;
for(gpu_c = 0; gpu_c < NUM_GPU; ++gpu_c){
cudaSetDevice(gpu_c);
cudaMalloc(&d_lens_x[gpu_c], sizeof(float) * nobjects);
cudaMalloc(&d_lens_y[gpu_c], sizeof(float) * nobjects);
cudaMalloc(&d_lens_mass[gpu_c], sizeof(float) * nobjects);
cudaMalloc(&d_results[gpu_c], PIXEL_SIZE*PIXEL_SIZE*sizeof(unsigned int));
cudaMalloc(&d_variables[gpu_c], sizeof(vars));
cudaMemset(d_results[gpu_c], 0, PIXEL_SIZE*PIXEL_SIZE*sizeof(unsigned int));
cudaMemcpy(d_variables[gpu_c], variables, sizeof(vars), cudaMemcpyHostToDevice);
cudaMemcpy(d_lens_x[gpu_c], lens_x, sizeof(float) * nobjects, cudaMemcpyHostToDevice);
cudaMemcpy(d_lens_y[gpu_c], lens_y, sizeof(float) * nobjects, cudaMemcpyHostToDevice);
cudaMemcpy(d_lens_mass[gpu_c], lens_mass, sizeof(float) * nobjects, cudaMemcpyHostToDevice);
// Perform gravitational microlensing
dim3 bdim(TILE_SIZE, TILE_SIZE);
dim3 gdim(GRID_SIZE, GRID_SIZE);
glensing<<<gdim, bdim>>>(d_lens_x[gpu_c], d_lens_y[gpu_c], d_lens_mass[gpu_c], nobjects, d_results[gpu_c], d_variables[gpu_c]);
cudaMemcpy(results[gpu_c], d_results[gpu_c], PIXEL_SIZE*PIXEL_SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
//group_glensing<<<gdim, bdim>>>(d_lens_x, d_lens_y, d_lens_mass, nobjects, d_results, d_variables);
unsigned int *final_result = (unsigned int *)calloc(PIXEL_SIZE * PIXEL_SIZE, sizeof(unsigned int));
int r_c=0;
for(; r_c < PIXEL_SIZE*PIXEL_SIZE; ++r_c){
for(t=0; t<NUM_GPU; ++t)
final_result[r_c] += results[t][r_c];
}
int total = total_r(final_result, PIXEL_SIZE * PIXEL_SIZE);
printf("The total num of rays is %d\n", total);
int highest_c = highest(final_result, PIXEL_SIZE * PIXEL_SIZE);
write_pgm(final_result, PIXEL_SIZE, PIXEL_SIZE, highest_c);
// Free the memory allocated during processing
// GPU
cudaFree(d_lens_x);
cudaFree(d_lens_y);
cudaFree(d_lens_mass);
cudaFree(d_results);
cudaFree(d_variables);
// CPU
free(lens_x);
free(lens_y);
free(lens_mass);
//free(results);
return 0;
}
|
2ccbe3715a9e57ef07970b2b6b964570ab906c8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/representations/key_val_pair.cuh>
#include <poggers/representations/dynamic_container.cuh>
#include <poggers/representations/key_only.cuh>
#include <stdio.h>
#include <iostream>
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace poggers::representations;
struct large_uint {
uint64_t internal_rep [8];
};
__host__ __device__ bool operator==(large_uint A, large_uint B){
for (int i =0; i < 8; i++){
if (A.internal_rep[i] != B.internal_rep[i]) return false;
}
return true;
}
__global__ void test_with_malloced(key_val_pair<uint64_t,uint64_t> * test){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("should be 1: %d\n", test[0].atomic_swap(34, 0));
printf("should be 0: %d\n", test[0].atomic_swap(34,1));
printf("done\n\n");
}
__global__ void test_big_with_malloced(key_val_pair<large_uint,uint64_t> * test){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
large_uint to_insert;
printf("size of to_insert: %llu\n", sizeof(to_insert));
printf("Size of big key %llu\n", sizeof(key_val_pair<large_uint, uint64_t>));
to_insert.internal_rep[0] = 34;
printf("should be 1: %d\n", test[0].atomic_swap(to_insert, 0));
printf("should be 0: %d\n", test[0].atomic_swap(to_insert,1));
printf("done\n\n");
}
__global__ void test_key_val_pair(){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("Thread %llu starting!\n", tid);
key_val_pair<uint64_t, uint64_t> test(64,42);
printf("should be 0: %d\n", test.atomic_swap(34, 0));
key_val_pair<uint64_t, uint64_t> test3;
printf("should be 1: %d\n", test3.atomic_swap(34, 0));
// large_uint large_uint_test;
// large_uint_test.internal_rep[0] =1234;
// key_val_pair<large_uint, uint64_t> test2;
//printf("%d\n", test2.atomic_swap(large_uint_test, 0));
// if (test2.atomic_swap(large_uint_test, 0UL)){
// printf("first %llu\n", test2.key.internal_rep[0]);
// } else {
// printf("Failure on swap\n");
// }
printf("done\n\n");
}
int main(int argc, char** argv) {
key_val_pair<uint64_t, uint64_t> * test;
hipMalloc((void ** )& test, sizeof(key_val_pair<uint64_t, uint64_t>));
hipMemset(test, 0, sizeof(key_val_pair<uint64_t, uint64_t>));
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_with_malloced), dim3(1),dim3(1), 0, 0, test);
hipDeviceSynchronize();
hipFree(test);
key_val_pair<large_uint, uint64_t> * big_test;
hipMalloc((void ** )& big_test, sizeof(key_val_pair<large_uint, uint64_t>));
hipMemset(big_test, 0, sizeof(key_val_pair<large_uint, uint64_t>));
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_big_with_malloced), dim3(1),dim3(1), 0, 0, big_test);
using smallkeytype = dynamic_container<key_val_pair, uint16_t>::representation<uint64_t, uint16_t>;
smallkeytype test_smallkey;
using smallkeyonly = dynamic_container<key_container, uint16_t>::representation<uint64_t, uint16_t>;
smallkeyonly test_smallkeyonly;
//test_key_val_pair<<<100,100>>>();
gpuErrorCheck(hipPeekAtLastError());
gpuErrorCheck(hipDeviceSynchronize());
return 0;
}
|
2ccbe3715a9e57ef07970b2b6b964570ab906c8e.cu
|
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/representations/key_val_pair.cuh>
#include <poggers/representations/dynamic_container.cuh>
#include <poggers/representations/key_only.cuh>
#include <stdio.h>
#include <iostream>
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace poggers::representations;
struct large_uint {
uint64_t internal_rep [8];
};
__host__ __device__ bool operator==(large_uint A, large_uint B){
for (int i =0; i < 8; i++){
if (A.internal_rep[i] != B.internal_rep[i]) return false;
}
return true;
}
__global__ void test_with_malloced(key_val_pair<uint64_t,uint64_t> * test){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("should be 1: %d\n", test[0].atomic_swap(34, 0));
printf("should be 0: %d\n", test[0].atomic_swap(34,1));
printf("done\n\n");
}
__global__ void test_big_with_malloced(key_val_pair<large_uint,uint64_t> * test){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
large_uint to_insert;
printf("size of to_insert: %llu\n", sizeof(to_insert));
printf("Size of big key %llu\n", sizeof(key_val_pair<large_uint, uint64_t>));
to_insert.internal_rep[0] = 34;
printf("should be 1: %d\n", test[0].atomic_swap(to_insert, 0));
printf("should be 0: %d\n", test[0].atomic_swap(to_insert,1));
printf("done\n\n");
}
__global__ void test_key_val_pair(){
uint64_t tid = threadIdx.x+blockIdx.x*blockDim.x;
if (tid != 0) return;
printf("Thread %llu starting!\n", tid);
key_val_pair<uint64_t, uint64_t> test(64,42);
printf("should be 0: %d\n", test.atomic_swap(34, 0));
key_val_pair<uint64_t, uint64_t> test3;
printf("should be 1: %d\n", test3.atomic_swap(34, 0));
// large_uint large_uint_test;
// large_uint_test.internal_rep[0] =1234;
// key_val_pair<large_uint, uint64_t> test2;
//printf("%d\n", test2.atomic_swap(large_uint_test, 0));
// if (test2.atomic_swap(large_uint_test, 0UL)){
// printf("first %llu\n", test2.key.internal_rep[0]);
// } else {
// printf("Failure on swap\n");
// }
printf("done\n\n");
}
int main(int argc, char** argv) {
key_val_pair<uint64_t, uint64_t> * test;
cudaMalloc((void ** )& test, sizeof(key_val_pair<uint64_t, uint64_t>));
cudaMemset(test, 0, sizeof(key_val_pair<uint64_t, uint64_t>));
cudaDeviceSynchronize();
test_with_malloced<<<1,1>>>(test);
cudaDeviceSynchronize();
cudaFree(test);
key_val_pair<large_uint, uint64_t> * big_test;
cudaMalloc((void ** )& big_test, sizeof(key_val_pair<large_uint, uint64_t>));
cudaMemset(big_test, 0, sizeof(key_val_pair<large_uint, uint64_t>));
cudaDeviceSynchronize();
test_big_with_malloced<<<1,1>>>(big_test);
using smallkeytype = dynamic_container<key_val_pair, uint16_t>::representation<uint64_t, uint16_t>;
smallkeytype test_smallkey;
using smallkeyonly = dynamic_container<key_container, uint16_t>::representation<uint64_t, uint16_t>;
smallkeyonly test_smallkeyonly;
//test_key_val_pair<<<100,100>>>();
gpuErrorCheck(cudaPeekAtLastError());
gpuErrorCheck(cudaDeviceSynchronize());
return 0;
}
|
1b9b0638ea29772087c94d21d7e2be86e4b21245.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
void sumArraysOnHostx(int *A, int *B, int *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sum_array_overlap(int * a, int * b, int * c, int N)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < N)
{
c[gid] = a[gid] + b[gid];
}
}
//int main()
//{
// int size = 1 << 25;
// int block_size = 128;
//
// //number of bytes needed to hold element count
// size_t NO_BYTES = size * sizeof(int);
//
// int const NUM_STREAMS = 8;
// int ELEMENTS_PER_STREAM = size / NUM_STREAMS;
// int BYTES_PER_STREAM = NO_BYTES / NUM_STREAMS;
//
// // host pointers
// int *h_a, *h_b, *gpu_result, *cpu_result;
//
// //allocate memory for host size pointers
// hipHostMalloc((void**)&h_a,NO_BYTES);
// hipHostMalloc((void**)&h_b, NO_BYTES);
// hipHostMalloc((void**)&gpu_result, NO_BYTES);
//
// cpu_result = (int *)malloc(NO_BYTES);
//
// //initialize h_a and h_b arrays randomly
// initialize(h_a, INIT_ONE_TO_TEN);
// initialize(h_b, INIT_ONE_TO_TEN);
//
// //summation in CPU
// sumArraysOnHostx(h_a, h_b, cpu_result, size);
//
// int *d_a, *d_b, *d_c;
// hipMalloc((int **)&d_a, NO_BYTES);
// hipMalloc((int **)&d_b, NO_BYTES);
// hipMalloc((int **)&d_c, NO_BYTES);
//
// hipStream_t streams[NUM_STREAMS];
//
// for (int i = 0; i < NUM_STREAMS; i++)
// {
// hipStreamCreate(&streams[i]);
// }
//
// //kernel launch parameters
// dim3 block(block_size);
// dim3 grid(ELEMENTS_PER_STREAM/block.x + 1);
//
// int offset = 0;
//
// for (int i = 0; i < NUM_STREAMS; i++)
// {
// offset = i * ELEMENTS_PER_STREAM;
// hipMemcpyAsync(&d_a[offset], &h_a[offset], BYTES_PER_STREAM, hipMemcpyHostToDevice,streams[i]);
// hipMemcpyAsync(&d_b[offset], &h_b[offset], BYTES_PER_STREAM, hipMemcpyHostToDevice,streams[i]);
//
// sum_array_overlap << <grid, block, 0, streams[i] >> > (&d_a[offset], &d_b[offset], &d_c[offset], size);
//
// hipMemcpyAsync(&gpu_result[offset], &d_c[offset], BYTES_PER_STREAM, hipMemcpyDeviceToHost,streams[i]);
// }
//
// for (int i = 0; i < NUM_STREAMS; i++)
// {
// hipStreamDestroy(streams[i]);
// }
//
// hipDeviceSynchronize();
//
// //validity check
// compare_arrays(cpu_result, gpu_result, size);
//
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c);
//
// hipHostFree(h_a);
// hipHostFree(h_b);
// hipHostFree(gpu_result);
// free(cpu_result);
//
// hipDeviceReset();
// return EXIT_SUCCESS;
//}
|
1b9b0638ea29772087c94d21d7e2be86e4b21245.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
void sumArraysOnHostx(int *A, int *B, int *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sum_array_overlap(int * a, int * b, int * c, int N)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < N)
{
c[gid] = a[gid] + b[gid];
}
}
//int main()
//{
// int size = 1 << 25;
// int block_size = 128;
//
// //number of bytes needed to hold element count
// size_t NO_BYTES = size * sizeof(int);
//
// int const NUM_STREAMS = 8;
// int ELEMENTS_PER_STREAM = size / NUM_STREAMS;
// int BYTES_PER_STREAM = NO_BYTES / NUM_STREAMS;
//
// // host pointers
// int *h_a, *h_b, *gpu_result, *cpu_result;
//
// //allocate memory for host size pointers
// cudaMallocHost((void**)&h_a,NO_BYTES);
// cudaMallocHost((void**)&h_b, NO_BYTES);
// cudaMallocHost((void**)&gpu_result, NO_BYTES);
//
// cpu_result = (int *)malloc(NO_BYTES);
//
// //initialize h_a and h_b arrays randomly
// initialize(h_a, INIT_ONE_TO_TEN);
// initialize(h_b, INIT_ONE_TO_TEN);
//
// //summation in CPU
// sumArraysOnHostx(h_a, h_b, cpu_result, size);
//
// int *d_a, *d_b, *d_c;
// cudaMalloc((int **)&d_a, NO_BYTES);
// cudaMalloc((int **)&d_b, NO_BYTES);
// cudaMalloc((int **)&d_c, NO_BYTES);
//
// cudaStream_t streams[NUM_STREAMS];
//
// for (int i = 0; i < NUM_STREAMS; i++)
// {
// cudaStreamCreate(&streams[i]);
// }
//
// //kernel launch parameters
// dim3 block(block_size);
// dim3 grid(ELEMENTS_PER_STREAM/block.x + 1);
//
// int offset = 0;
//
// for (int i = 0; i < NUM_STREAMS; i++)
// {
// offset = i * ELEMENTS_PER_STREAM;
// cudaMemcpyAsync(&d_a[offset], &h_a[offset], BYTES_PER_STREAM, cudaMemcpyHostToDevice,streams[i]);
// cudaMemcpyAsync(&d_b[offset], &h_b[offset], BYTES_PER_STREAM, cudaMemcpyHostToDevice,streams[i]);
//
// sum_array_overlap << <grid, block, 0, streams[i] >> > (&d_a[offset], &d_b[offset], &d_c[offset], size);
//
// cudaMemcpyAsync(&gpu_result[offset], &d_c[offset], BYTES_PER_STREAM, cudaMemcpyDeviceToHost,streams[i]);
// }
//
// for (int i = 0; i < NUM_STREAMS; i++)
// {
// cudaStreamDestroy(streams[i]);
// }
//
// cudaDeviceSynchronize();
//
// //validity check
// compare_arrays(cpu_result, gpu_result, size);
//
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c);
//
// cudaFreeHost(h_a);
// cudaFreeHost(h_b);
// cudaFreeHost(gpu_result);
// free(cpu_result);
//
// cudaDeviceReset();
// return EXIT_SUCCESS;
//}
|
1fa87d12b409a9eb89af77dee5980c5838e6afff.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "voxel/voxelizer_device.h"
#include "voxelizer_imp.cuh"
#ifdef VOXELIZER_DEVICE
#include <device_launch_parameters.h>
namespace voxel {
int Voxelize(const float *d_vertices, const int *d_triangles, const int N,
const float *d_tri_bbox0, const float *d_tri_bbox1,
DeviceVoxels &vols) {
// (x, y, z) --> (x + y * n) * (n / 32) + z / 32, and z % 32 index in the byte
const int block_size = 128;
int block_num = (N + block_size - 1) / block_size;
thrust::device_vector<int> ttt(N);
hipLaunchKernelGGL(( device::voxelize_kernel), dim3(block_num), dim3(block_size), 0, 0,
d_vertices, d_triangles, N, d_tri_bbox0, d_tri_bbox1, vols.bbox0_ptr(),
vols.delta_ptr(), vols.inv_delta_ptr(), vols.stride_ptr(), vols.vols_ptr()
#ifdef STASTICS_DEVICE
,thrust::raw_pointer_cast(&ttt.front())
#endif
);
hipError_t cudaStatus;
/* cudaStatus = hipDeviceSynchronize();
if (hipSuccess != cudaStatus ) {
ax::Logger::Debug("voxelize kernel failed!\n");
return kFailed;
}*/
/*thrust::device_vector<float> start(N), end(N);
device::test_kernel<<<block_num, block_size>>>(
d_tri_bbox0, d_tri_bbox1, vols.inv_delta_ptr(), vols.bbox0_ptr(), N,
thrust::raw_pointer_cast(&start.front()),
thrust::raw_pointer_cast(&end.front()));
cutilCheckMsg("test_kernel()");
cudaStatus = hipDeviceSynchronize();
if (hipSuccess != cudaStatus ) {
ax::Logger::Debug("test kernel failed!\n");
return kFailed;
}*/
//float startxxx = start[2077];
//float endxxx = end[2077];
//printf("startxxx: %.12f\n", startxxx);
//printf("endxxx: %.12f\n", endxxx);
//printf("xxxxxx: %.12f\n", startxxx - endxxx);
//thrust::device_vector<float> s(128, startxxx);
//thrust::device_vector<float> e(128, endxxx);
//thrust::device_vector<float> res(128);
//device::test2_kernel<<<1, 128>>>(
// thrust::raw_pointer_cast(&s.front()),
// thrust::raw_pointer_cast(&e.front()),
// thrust::raw_pointer_cast(&res.front()));
//cutilCheckMsg("test_kernel()");
//cudaStatus = hipDeviceSynchronize();
//if (hipSuccess != cudaStatus ) {
// ax::Logger::Debug("test kernel failed!\n");
// return kFailed;
//}
//float ss = s[0];
//float ee = e[0];
//float r = res[0];
//printf("startxxx: %.12f\n", ss);
//printf("endxxx: %.12f\n", ee);
//printf("xxxxxx: %.12f\n", r);
//const int n_stastics = 128;
//int count[n_stastics];
//memset(count, 0, sizeof(int) * n_stastics);
//
//thrust::host_vector<int> h_ttt(ttt);
//for (int i = 0; i < N; ++i) {
// int t = h_ttt[i];
// ++count[t];
//}
//ax::Logger::Debug("not preprocessed:", count[0]);
//ax::Logger::Debug("preprocessed:", count[1]);
//ax::Logger::Debug("processed:", count[2]);
//ax::Logger::Debug("simply processed in X:", count[3]);
//ax::Logger::Debug("simply processed:", count[4]);
////ax::Logger::Debug("normal error:", count[10000]);
return ax::kOK;
}
} // voxel
#endif // VOXELIZER_DEVICE
|
1fa87d12b409a9eb89af77dee5980c5838e6afff.cu
|
#include "voxel/voxelizer_device.h"
#include "voxelizer_imp.cuh"
#ifdef VOXELIZER_DEVICE
#include <device_launch_parameters.h>
namespace voxel {
int Voxelize(const float *d_vertices, const int *d_triangles, const int N,
const float *d_tri_bbox0, const float *d_tri_bbox1,
DeviceVoxels &vols) {
// (x, y, z) --> (x + y * n) * (n / 32) + z / 32, and z % 32 index in the byte
const int block_size = 128;
int block_num = (N + block_size - 1) / block_size;
thrust::device_vector<int> ttt(N);
device::voxelize_kernel<<<block_num, block_size>>>(
d_vertices, d_triangles, N, d_tri_bbox0, d_tri_bbox1, vols.bbox0_ptr(),
vols.delta_ptr(), vols.inv_delta_ptr(), vols.stride_ptr(), vols.vols_ptr()
#ifdef STASTICS_DEVICE
,thrust::raw_pointer_cast(&ttt.front())
#endif
);
cudaError_t cudaStatus;
/* cudaStatus = cudaDeviceSynchronize();
if (cudaSuccess != cudaStatus ) {
ax::Logger::Debug("voxelize kernel failed!\n");
return kFailed;
}*/
/*thrust::device_vector<float> start(N), end(N);
device::test_kernel<<<block_num, block_size>>>(
d_tri_bbox0, d_tri_bbox1, vols.inv_delta_ptr(), vols.bbox0_ptr(), N,
thrust::raw_pointer_cast(&start.front()),
thrust::raw_pointer_cast(&end.front()));
cutilCheckMsg("test_kernel()");
cudaStatus = cudaDeviceSynchronize();
if (cudaSuccess != cudaStatus ) {
ax::Logger::Debug("test kernel failed!\n");
return kFailed;
}*/
//float startxxx = start[2077];
//float endxxx = end[2077];
//printf("startxxx: %.12f\n", startxxx);
//printf("endxxx: %.12f\n", endxxx);
//printf("xxxxxx: %.12f\n", startxxx - endxxx);
//thrust::device_vector<float> s(128, startxxx);
//thrust::device_vector<float> e(128, endxxx);
//thrust::device_vector<float> res(128);
//device::test2_kernel<<<1, 128>>>(
// thrust::raw_pointer_cast(&s.front()),
// thrust::raw_pointer_cast(&e.front()),
// thrust::raw_pointer_cast(&res.front()));
//cutilCheckMsg("test_kernel()");
//cudaStatus = cudaDeviceSynchronize();
//if (cudaSuccess != cudaStatus ) {
// ax::Logger::Debug("test kernel failed!\n");
// return kFailed;
//}
//float ss = s[0];
//float ee = e[0];
//float r = res[0];
//printf("startxxx: %.12f\n", ss);
//printf("endxxx: %.12f\n", ee);
//printf("xxxxxx: %.12f\n", r);
//const int n_stastics = 128;
//int count[n_stastics];
//memset(count, 0, sizeof(int) * n_stastics);
//
//thrust::host_vector<int> h_ttt(ttt);
//for (int i = 0; i < N; ++i) {
// int t = h_ttt[i];
// ++count[t];
//}
//ax::Logger::Debug("not preprocessed:", count[0]);
//ax::Logger::Debug("preprocessed:", count[1]);
//ax::Logger::Debug("processed:", count[2]);
//ax::Logger::Debug("simply processed in X:", count[3]);
//ax::Logger::Debug("simply processed:", count[4]);
////ax::Logger::Debug("normal error:", count[10000]);
return ax::kOK;
}
} // voxel
#endif // VOXELIZER_DEVICE
|
de8ea8bd804a47e318dabcddccf1502286e98d97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <mat.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <matrix.h>
#include <iostream>
#include "rocblas.h"
#include "cokus.cpp"
#include "cuda_util.h"
#include <hip/hip_runtime.h>
using namespace std;
const int KER_NUM = 20;//
const int P_NUM = 3;//
const int LEAP = 2;//
const int GP_NUM = 3;//maxpooling
const int NEU_NUM1 = 100;
const int NEU_NUM2 = 13;//
const int NEIGHBOR = 8;//
double LEARN_RATE = 0.01;
const double MIN_ERR = 0.001;
const int VALID_BATCH = 10;
//const int DATA_BATCH = 512;//512
//CUDA
bool InitCUDA(){
int count;
hipGetDeviceCount(&count);
if(count==0){
fprintf(stderr,"There is no device.\n");
return false;
}
int i;
for (i =0; i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)==hipSuccess){
if(prop.major>=1){ break;
}
}
}
if(i==count){
fprintf(stderr,"There is no device supporting CUDA 1.x.\n");
return false;
}
hipSetDevice(i);
return true;
}
//copyshared memory
__device__ void copy_data_to_shared(double * data, double * data_tmp,int head, int length){
for(int i=0; i<length; i++){
data_tmp[i] = data[i+head];
}
__syncthreads();
}
//GPU
__global__ static void convol(int iter,int i0,double * train,double * kernel,double * re,double * bias,int x,int y,int z,int re_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;//
//3*3*hight
if (id < KER_NUM){
extern __shared__ double train_tmp[];
//__shared__ double train_tmp[9*200];
int st = i0 * x * y * z;
copy_data_to_shared(train,train_tmp,st,x*y*z);//trainshared memory
/*double * ker = new double [x*y*P_NUM];//kernel
for(int i=0; i<x*y*P_NUM; i++){
ker[i] = kernel[id*x*y*P_NUM + i];
}*/
double mid;
//int i_1=0;
for(int i=0; i<re_size; i++){
mid = 0;
int start = i*x*y*LEAP;//
for(int j=0; j<x*y*P_NUM; j++){
mid = mid + train_tmp[start + j]*kernel[id*x*y*P_NUM+j];
}
mid = mid + bias[id];
re[i + id*re_size] = 2/(1+(1/exp(2*mid))) - 1;//tanh
}
/*for
}*/
}
}
//GPU
__global__ static void maxpooling(int iter,double * re,double * mre,int * mre_index,int re_size,int mre_num){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
//int res = re_size, mres = mre_num;
//extern __shared__ double re_tmp[];
//copy_data_to_shared(re, re_tmp, 0, re_size*KER_NUM);
if(id < KER_NUM){
double mid;
int mid_index;
for(int i=0; i<mre_num; i++){
mid = re[i*GP_NUM + id*re_size];//
mid_index = i*GP_NUM + id*re_size;
for(int j=i*GP_NUM+1; j<(i+1)*GP_NUM && j<re_size; j++){
if(mid < re[j + id*re_size]){
mid = re[j + id*re_size];
mid_index = j+id*re_size;
}
}
mre[i + id * mre_num] = mid;
mre_index[i + id * mre_num] = mid_index;
}
}
}
//,
__global__ static void fullconnect(int iter,double * mre,double * omega,double * bias,double * F1,int mre_size){
int tid = blockIdx.x * blockDim.x +threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM1){
//mre
//__shared__ double mre_tmp[50 * KER_NUM];
extern __shared__ double mre_tmp[];
copy_data_to_shared(mre,mre_tmp,0,mre_size);
//
double mid=0;
for(int i=0; i<mre_size; i++){
mid = mid + omega[id + i*NEU_NUM1] * mre_tmp[i];
}
mid = mid + bias[id];
F1[id] = 2/(1 + 1/exp(mid * 2)) - 1;//tanh
}
}
//
__global__ static void output(int iter, double * F1, double * omega2, double * bias, double * O2){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM2){
//F1
__shared__ double F1_tmp[NEU_NUM1];
copy_data_to_shared(F1, F1_tmp, 0, NEU_NUM1);
//
double mid = 0;
for(int i=0; i<NEU_NUM1; i++){
mid = mid + omega2[id + i*NEU_NUM2] * F1_tmp[i];
}
O2[id] = 1/(1 + 1/exp(mid + bias[id]));//sigmoid
}
}
/**/
//
__global__ static void bp_output(int iter,int train_idx, double LEARN_RATE, double * labels, double * O2, double * bias2, double * delta_L_a, double * delta_L_z)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM2){
delta_L_a[id] = -(labels[id + train_idx * NEU_NUM2] - O2[id]);
delta_L_z[id] = delta_L_a[id] * O2[id] *(1 - O2[id]);
bias2[id] = bias2[id] - delta_L_z[id]*LEARN_RATE;
}
}
//
__global__ static void bp_fullconnect(int iter, double LEARN_RATE, double * omega2,double * bias1, double * F1, double * delta_L_a, double * delta_L_z, double *delta_f_a, double * delta_f_z)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM1){
double mid = 0;
double delta_f_w;
for(int i=0; i<NEU_NUM2; i++){
mid = mid + omega2[i + id*NEU_NUM2] * delta_L_z[i];
//delta_f_b[i] = delta_L_z[i];
delta_f_w = F1[id] * delta_L_z[i];
omega2[i + id*NEU_NUM2] = omega2[i + id*NEU_NUM2] - LEARN_RATE * delta_f_w;
//bias2[i] = bias2[i] - LEARN_RATE*delta_f_b[i];
}
delta_f_a[id] = mid;
delta_f_z[id] = delta_f_a[id] * (1 + F1[id]) * (1 - F1[id]);
bias1[id] = bias1[id] - LEARN_RATE * delta_f_z[id];
}
}
//maxpoolingdelta_adelta_z
__global__ static void bp_maxpooling(int iter, int mre_size,double LEARN_RATE, int *mre_index, double * omega1,double *mre, double * delta_f_a, double * delta_f_z, double * delta_m_a, double * delta_22)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < mre_size){
double mid = 0;
double delta_m_w;
for(int i=0; i<NEU_NUM1; i++){
mid = mid + omega1[i + id*NEU_NUM1] * delta_f_z[i];
//delta_2[i + id*NEU_NUM1] = mid;
delta_m_w = mre[id] * delta_f_z[i];
omega1[i + id*NEU_NUM1] = omega1[i + id*NEU_NUM1] - LEARN_RATE * delta_m_w;
}
delta_m_a[id] = mid;
//delta_2[id] = delta_m_a[id];
//int idx = mre_index[id];
delta_22[mre_index[id]] = delta_m_a[id] * (1 + mre[id]) * (1 - mre[id]);
}
}
//kernel
__global__ static void bp_update_kernel(int iter,int i0, double LEARN_RATE, int x, int y, int z, int mre_num,int re_size, int * mre_index, double * delta_22, double * data, double * kernel,double * bias0)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum =blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < KER_NUM){
extern __shared__ double train_tmp[];
copy_data_to_shared(data, train_tmp, x*y*z*i0, x*y*z);
double * delta_k_w = new double [x*y*P_NUM];
for(int i=0; i<x*y*P_NUM;i++)
delta_k_w[i] = 0;
double mid = 0;
for (int i=0; i<mre_num; i++){
int idx = mre_index[i + id*mre_num];
int n = idx % re_size;//n
int head = x*y*LEAP*n;
for(int j=0; j<x*y*P_NUM; j++){
delta_k_w[j] = delta_k_w[j] + delta_22[idx] * train_tmp[j+head];
}
mid = mid + delta_22[idx];
}
for(int i=0;i<x*y*P_NUM;i++){
delta_k_w[i] = delta_k_w[i]/mre_num;
kernel[id*x*y*P_NUM+i] = kernel[id*x*y*P_NUM+i] - LEARN_RATE*delta_k_w[i];
}
//double delta_k_b = delta_22[idx];
bias0[id] = bias0[id] - LEARN_RATE*(mid/mre_num);
delete [] delta_k_w;
}
}
//
__global__ static void processing(int iter, double * data, int * train_index, double * processed_data, int x, int y, int z, int train_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
//int idx = id * (NEIGHBOR+1) * z;//processed_data
if (id < train_size){
int idx = id * (NEIGHBOR+1) * z;
for (int i=0; i<z; i++){
for (int j=0; j<(NEIGHBOR+1); j++){
processed_data[idx] = data[train_index[j + id*(NEIGHBOR+1)] + i * x*y];
idx = idx + 1;
}
}
}
}
double lossfunction(double * output, double * labels, int idx){
double l = 0;
for(int i=0; i<NEU_NUM2; i++){
l = l + (output[i] - labels[i + idx*NEU_NUM2]) * (output[i] - labels[i + idx*NEU_NUM2]);
}
l = l/2;
return l;
}
//
double count_err(double * test_labels, double * output, int test_idx)
{
double right=0;
double max =0;
int idx = 0;
for(int i=0; i<NEU_NUM2; i++){
if(output[i]>max){
max = output[i];
idx = i;
}
}
if((idx+1) == int(test_labels[test_idx]))
right = 1;
return right;
}
//
void insert_line(double * a, double b){
for(int i=1; i<VALID_BATCH; i++){
a[i-1] = a[i];
}
a[VALID_BATCH-1] = b;
}
double max(double * a){
double m=a[0];
for(int i=1; i<VALID_BATCH; i++){
if(m<a[i])
m=a[i];
}
return m;
}
//shuffle
void shuffle(int * data, double * labels, int dim_row, int width){
int index, i;
int temp;
double tmp;
srand(time(NULL));
for(i=0; i<width; i++){
index=rand()%(width-i) + i;
if(index != i){
for(int j=0; j<dim_row; j++){
temp = data[j + i*dim_row];
data[j + i*dim_row] = data[j +index*dim_row];
data[j + index*dim_row] = temp;
}
for(int j=0; j<NEU_NUM2; j++){
tmp = labels[j + i*NEU_NUM2];
labels[j + i*NEU_NUM2] = labels[j + index*NEU_NUM2];
labels[j + index*NEU_NUM2] = tmp;
}
}
}
}
//
double training(double * data, double * labels, int x, int y, int z){
clock_t start, end;
start = clock();
double * gpu_data;//
double * gpu_processed_train;//
double * gpu_processed_test;
double * gpu_processed_valid;
int * gpu_train_index;//
int * gpu_test_index;
int * gpu_valid_index;
double * gpu_processed_labels;
//double * gpu_test_labels;
//
int data_size = 0;
int * data_index = new int [x*y];
for(int i=0; i<x*y; i++){
if(labels[i] != 0){
data_index[data_size]=i;
data_size ++;
}
}
int test_size = (data_size-1)/6 + 1;
int valid_size = test_size;
int train_size = data_size - test_size - valid_size;
fprintf(stdout,"train_size:%d test_size:%d\n valid_size:%d\n",train_size,test_size,valid_size);
int * train_index = new int [train_size * (NEIGHBOR + 1)];//9x*y
int * valid_index = new int [valid_size * (NEIGHBOR + 1)];
int * test_index = new int [test_size * (NEIGHBOR+1)];
double * processed_labels = new double [train_size * NEU_NUM2]();
double * test_labels = new double [test_size]();
double * valid_labels = new double [valid_size]();
int tr=0, te=0, va=0;
for (int i=0; i<data_size; i++){
if (i%6 != 0 && i%6 != 1){
train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1)] = data_index[i];//
train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) - 1] = data_index[i] - 1;
train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) + 1] = data_index[i] + 1;
for(int j0=0;j0<3;j0++){
train_index[j0 + tr * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0;
train_index[j0+6 + tr * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0;
}
if((data_index[i] % x) == 0){//
for (int j=0; j<3; j++)
train_index[j*3 + tr*(NEIGHBOR+1)] = train_index[j*3+2 + tr*(NEIGHBOR+1)];
}
if((data_index[i] % x) == (x-1)){//
for(int j=0;j<3;j++)
train_index[j*3+2 + tr*(NEIGHBOR+1)] = train_index[j*3 + tr*(NEIGHBOR+1)];
}
if((data_index[i]/x) == 0){//
for(int j=0;j<3;j++)
train_index[j + tr*(NEIGHBOR+1)] = train_index[j+6 + tr*(NEIGHBOR+1)];
}
if((data_index[i]/x) == (y-1)){//
for(int j=0;j<3;j++)
train_index[j+6 + tr*(NEIGHBOR+1)] = train_index[j + tr*(NEIGHBOR+1)];
}
int mid = int(labels[data_index[i]])-1 + tr*NEU_NUM2;
processed_labels[mid] = 1;
tr = tr + 1;
}
if(i%6 == 0){
test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1)] = data_index[i];//
test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) - 1] = data_index[i] - 1;
test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) + 1] = data_index[i] + 1;
for(int j0=0;j0<3;j0++){
test_index[j0 + te * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0;
test_index[j0+6 + te * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0;
}
if((data_index[i] % x) == 0){//
for (int j=0; j<3; j++)
test_index[j*3 + te*(NEIGHBOR+1)] = test_index[j*3+2 + te*(NEIGHBOR+1)];
}
if((data_index[i] % x) == (x-1)){//
for(int j=0;j<3;j++)
test_index[j*3+2 + te*(NEIGHBOR+1)] = test_index[j*3 + te*(NEIGHBOR+1)];
}
if((data_index[i]/x) == 0){//
for(int j=0;j<3;j++)
test_index[j + te*(NEIGHBOR+1)] = test_index[j+6 + te*(NEIGHBOR+1)];
}
if((data_index[i]/x) == (y-1)){//
for(int j=0;j<3;j++)
test_index[j+6 + te*(NEIGHBOR+1)] = test_index[j + te*(NEIGHBOR+1)];
}
//int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2;
test_labels[te] = labels[data_index[i]];
te = te + 1;
}
if(i%6 == 1){
valid_index[(NEIGHBOR/2) + va * (NEIGHBOR+1)] = data_index[i];//
valid_index[(NEIGHBOR/2) + va * (NEIGHBOR+1) - 1] = data_index[i] - 1;
valid_index[(NEIGHBOR/2) + va * (NEIGHBOR+1) + 1] = data_index[i] + 1;
for(int j0=0;j0<3;j0++){
valid_index[j0 + va * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0;
valid_index[j0+6 + va * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0;
}
if((data_index[i] % x) == 0){//
for (int j=0; j<3; j++)
valid_index[j*3 + va*(NEIGHBOR+1)] = valid_index[j*3+2 + va*(NEIGHBOR+1)];
}
if((data_index[i] % x) == (x-1)){//
for(int j=0;j<3;j++)
valid_index[j*3+2 + va*(NEIGHBOR+1)] = valid_index[j*3 + va*(NEIGHBOR+1)];
}
if((data_index[i]/x) == 0){//
for(int j=0;j<3;j++)
valid_index[j + va*(NEIGHBOR+1)] = valid_index[j+6 + va*(NEIGHBOR+1)];
}
if((data_index[i]/x) == (y-1)){//
for(int j=0;j<3;j++)
valid_index[j+6 + va*(NEIGHBOR+1)] = test_index[j + va*(NEIGHBOR+1)];
}
//int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2;
valid_labels[va] = labels[data_index[i]];
va = va + 1;
}
}
shuffle(train_index, processed_labels, (NEIGHBOR+1), train_size);//
fprintf(stdout,"train_size:%d\n",train_size);
fprintf(stdout,"train_index:%d %d %d %d\ntest_index:%d %d %d %d\nvalid_index:%d %d %d %d\n",train_index[0],train_index[1],train_index[2],train_index[3],test_index[0],test_index[1],test_index[2],test_index[3],valid_index[0],valid_index[1],valid_index[2],valid_index[3]);
fprintf(stdout,"train labels:\n");
for(int i=0; i<NEU_NUM2; i++){
fprintf(stdout,"%lf ",processed_labels[i]);
}
fprintf(stdout,"\n");
fprintf(stdout,"test label:%lf",test_labels[0]);
fprintf(stdout,"valid label:%lf",valid_labels[0]);
//int * train_index = new int [train_size * (NEIGHBOR + 1)];//train_size9
//
SAFE_CALL(hipMalloc((void **) &gpu_data, sizeof(double) * x * y * z));
SAFE_CALL(hipMemcpy(gpu_data, data, sizeof(double)* x * y * z, hipMemcpyHostToDevice));
SAFE_CALL(hipMalloc((void **) &gpu_train_index, sizeof(int) * train_size * (NEIGHBOR+1)));
SAFE_CALL(hipMemcpy(gpu_train_index, train_index, sizeof(int) * train_size * (NEIGHBOR+1), hipMemcpyHostToDevice));
SAFE_CALL(hipMalloc((void **) &gpu_test_index, sizeof(int) * test_size * (NEIGHBOR+1)));
SAFE_CALL(hipMemcpy(gpu_test_index, test_index, sizeof(int) * test_size * (NEIGHBOR+1), hipMemcpyHostToDevice));
SAFE_CALL(hipMalloc((void **) &gpu_valid_index, sizeof(int) * valid_size * (NEIGHBOR+1)));
SAFE_CALL(hipMemcpy(gpu_valid_index, valid_index, sizeof(int) * valid_size * (NEIGHBOR+1), hipMemcpyHostToDevice));
SAFE_CALL(hipMalloc((void **) &gpu_processed_valid, sizeof(double) * valid_size * (NEIGHBOR+1) * z));
SAFE_CALL(hipMalloc((void **) &gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z));
SAFE_CALL(hipMalloc((void **) &gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z));//
int gridsize = 64;
int blocksize = 1024;
//int threadNum = gridsize * blocksize;
double * processed_train = new double [train_size * (NEIGHBOR+1) * z];
double * processed_test = new double [test_size * (NEIGHBOR+1) * z];
double * processed_valid = new double [valid_size * (NEIGHBOR+1) * z];
//
int iter=0;
hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_train_index, gpu_processed_train, x, y, z, train_size);
hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_test_index, gpu_processed_test, x, y, z, test_size);
hipLaunchKernelGGL(( processing), dim3(gridsize),dim3(blocksize), 0, 0, iter, gpu_data, gpu_valid_index, gpu_processed_valid, x, y, z, valid_size);
hipDeviceSynchronize();
end = clock();
double tt = double(end - start);
fprintf(stdout,"Using time of preprocessing:%lf\n",tt/CLOCKS_PER_SEC);
SAFE_CALL(hipMemcpy(processed_train, gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost));
SAFE_CALL(hipMemcpy(processed_test, gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost));
SAFE_CALL(hipMemcpy(processed_valid, gpu_processed_valid, sizeof(double) * valid_size * (NEIGHBOR+1) * z, hipMemcpyDeviceToHost));
SAFE_CALL(hipFree(gpu_data));
SAFE_CALL(hipFree(gpu_train_index));
SAFE_CALL(hipFree(gpu_test_index));
SAFE_CALL(hipFree(gpu_valid_index));
hipDeviceSynchronize();
fprintf(stdout,"Processed train data:%lf %lf %lf %lf\n",processed_train[0],processed_train[1],processed_train[2],processed_train[3]);
fprintf(stdout,"Processed test data:%lf %lf %lf %lf\n",processed_test[0],processed_test[1],processed_test[2],processed_test[3]);
fprintf(stdout,"processed valid data:%lf %lf %lf %lf\n",processed_valid[0],processed_valid[1],processed_valid[2],processed_valid[3]);
start = clock();
//
double * kernel = new double [(NEIGHBOR+1)*P_NUM*KER_NUM];
//kernekl
for(int i=0; i<(NEIGHBOR+1)*P_NUM*KER_NUM; i++){
kernel[i] = 2*(rand()/(double)(RAND_MAX)) - 1 ;
kernel[i] = kernel[i]/20;
if(kernel[i] == 0 )
kernel[i] = 0.005;
}
fprintf(stdout,"kernel:%lf %lf %lf %lf\n",kernel[0], kernel[1], kernel[2], kernel[3]);
//
int re_size = 0;
for (int i=0; i+P_NUM-1<z; i+=LEAP){
re_size ++;
}
//double * re = new double [re_size * KER_NUM];
fprintf(stdout,"Size of re:%d\n",re_size);
int mre_num = re_size/GP_NUM + 1;
if(re_size/GP_NUM == 0){
mre_num = re_size / GP_NUM;
}
fprintf(stdout,"mre_num:%d\n",mre_num);
int mre_size = mre_num * KER_NUM;
int ome_num1 = mre_num * KER_NUM * NEU_NUM1;//
int ome_num2 = NEU_NUM1 * NEU_NUM2;//
double * gpu_kernel;
double * gpu_bias0;
double * gpu_re;//
double * gpu_mre;//maxpooling
int * gpu_mre_index;//
double * gpu_omega1;//
double * gpu_F1;//
double * gpu_bias1;
double * gpu_omega2;
double * gpu_O2;
double * gpu_bias2;
double * gpu_delta_La;
double * gpu_delta_Lz;
double * gpu_delta_fa;
double * gpu_delta_fz;
double * gpu_delta_ma;
double * gpu_delta_22;
double * delta_22 = new double [re_size*KER_NUM]();
//
SAFE_CALL(hipMalloc((void**) &gpu_processed_labels, sizeof(double) * train_size * NEU_NUM2));
SAFE_CALL(hipMemcpy(gpu_processed_labels,processed_labels,sizeof(double) * train_size * NEU_NUM2,hipMemcpyHostToDevice));
//kernel
SAFE_CALL(hipMalloc((void**) &gpu_kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM));
SAFE_CALL(hipMemcpy(gpu_kernel,kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM,hipMemcpyHostToDevice));
//gpu_re
SAFE_CALL(hipMalloc((void **) &gpu_re,sizeof(double) * re_size * KER_NUM));
//
SAFE_CALL(hipMalloc((void **) &gpu_delta_La, sizeof(double) * NEU_NUM2));
SAFE_CALL(hipMalloc((void **) &gpu_delta_Lz, sizeof(double) * NEU_NUM2));
//
SAFE_CALL(hipMalloc((void **) &gpu_delta_fa, sizeof(double) * NEU_NUM1));
SAFE_CALL(hipMalloc((void **) &gpu_delta_fz, sizeof(double) * NEU_NUM1));
//maxpooling
SAFE_CALL(hipMalloc((void **) &gpu_delta_ma, sizeof(double) * mre_size));
//SAFE_CALL(hipMalloc((void **) &gpu_delta_mz, sizeof(double) * mre_size));
//
//SAFE_CALL(hipMalloc((void **) &gpu_delta_2, sizeof(double) * mre_size));
SAFE_CALL(hipMalloc((void **) &gpu_delta_22,sizeof(double) * re_size * KER_NUM));
SAFE_CALL(hipMemcpy(gpu_delta_22, delta_22, sizeof(double) * re_size * KER_NUM, hipMemcpyHostToDevice));
//SAFE_CALL(hipMalloc((void **) &gpu_delta_kw, sizeof(double) * (NEIGHBOR+1) *P_NUM));
double * omega1 = new double [ome_num1];
double * omega2 = new double [ome_num2];
double * bias0 = new double [KER_NUM];
double * bias1 = new double [NEU_NUM1];
double * bias2 = new double [NEU_NUM2];
//Omega1
for(int i=0; i<ome_num1; i++){
omega1[i] = 2 * (rand()/(double)(RAND_MAX)) - 1;
omega1[i] = omega1[i]/20;
if(omega1[i] == 0)
omega1[i] = 0.01;
}
//bias0
for(int i=0; i<KER_NUM; i++){
bias0[i] = 2*(rand()/(double)(RAND_MAX)) - 1;
bias0[i] = bias0[i]/20;
}
//bias1
for(int i=0; i<NEU_NUM1; i++){
bias1[i] = 2*(rand()/(double)(RAND_MAX)) - 1;
bias1[i] = bias1[i]/20;
}
//Omega2
for(int i=0; i<ome_num2; i++){
omega2[i] = 2 * (rand()/(double)(RAND_MAX)) - 1;
omega2[i] = omega2[i]/20;
if(omega2[i] ==0)
omega2[i] = 0.01;
}
fprintf(stdout, "Bias1: %lf %lf %lf\n",bias1[0],bias1[1],bias1[2]);
//bias2
for(int i=0; i<NEU_NUM2; i++){
bias2[i] = 2*(rand()/(double)(RAND_MAX)) - 1;
bias2[i] = bias2[i]/20;
}
fprintf(stdout, "Bias2: %lf %lf %lf\n",bias2[0],bias2[1],bias2[2]);
SAFE_CALL(hipMalloc((void **) &gpu_mre, sizeof(double) * mre_num * KER_NUM));//maxpoolinggpu_mre
SAFE_CALL(hipMalloc((void **) &gpu_mre_index, sizeof(int) * mre_num * KER_NUM));//maxpooling
SAFE_CALL(hipMalloc((void **) &gpu_omega1, sizeof(double) * ome_num1));//
SAFE_CALL(hipMalloc((void **) &gpu_omega2, sizeof(double) * ome_num2));//
SAFE_CALL(hipMalloc((void **) &gpu_F1, sizeof(double) * NEU_NUM1));//
SAFE_CALL(hipMalloc((void **) &gpu_O2, sizeof(double) * NEU_NUM2));//
SAFE_CALL(hipMalloc((void **) &gpu_bias0, sizeof(double) * KER_NUM));//
SAFE_CALL(hipMalloc((void **) &gpu_bias1, sizeof(double) * NEU_NUM1));//
SAFE_CALL(hipMalloc((void **) &gpu_bias2, sizeof(double) * NEU_NUM2));//
SAFE_CALL(hipMemcpy(gpu_omega1, omega1, sizeof(double) * ome_num1, hipMemcpyHostToDevice));//GPU
SAFE_CALL(hipMemcpy(gpu_omega2, omega2, sizeof(double) * ome_num2, hipMemcpyHostToDevice));
SAFE_CALL(hipMemcpy(gpu_bias0, bias0, sizeof(double) * KER_NUM, hipMemcpyHostToDevice));
SAFE_CALL(hipMemcpy(gpu_bias1, bias1, sizeof(double) * NEU_NUM1, hipMemcpyHostToDevice));//
SAFE_CALL(hipMemcpy(gpu_bias2, bias2, sizeof(double) * NEU_NUM2, hipMemcpyHostToDevice));
//double * mre = new double [mre_num * KER_NUM];//CPUmaxpooling
//double * F1 = new double [NEU_NUM1];//CPU
double * O2 = new double [NEU_NUM2];//CPU
//double * lz = new double [NEU_NUM2];
//double loss;
double * correct_rate = new double [VALID_BATCH];
double cur_max = 0;//
int count=1;
for(int j=0; j<301; j++){
//if (j % 100 == 0)
// fprintf(stdout,"The %dth iteration.\n",j);
//loss = 0;
for(int i0=0; i0<train_size; i0++){
int iter = 0;
//
hipLaunchKernelGGL(( convol), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i0,gpu_processed_train,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size);
hipDeviceSynchronize();
//maxpoolingre
hipLaunchKernelGGL(( maxpooling), dim3(1),dim3(KER_NUM), 0, 0, iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num);
hipDeviceSynchronize();
//
hipLaunchKernelGGL(( fullconnect), dim3(1),dim3(NEU_NUM1),mre_size * sizeof(double), 0, iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size);
hipDeviceSynchronize();
//
hipLaunchKernelGGL(( output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2);
hipDeviceSynchronize();
//SAFE_CALL(hipMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost));
//hipDeviceSynchronize();
//double single_loss = lossfunction(O2, processed_labels, i0);
//loss = loss + single_loss;
//
hipLaunchKernelGGL(( bp_output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,i0,LEARN_RATE,gpu_processed_labels,gpu_O2,gpu_bias2,gpu_delta_La,gpu_delta_Lz);
hipDeviceSynchronize();
//
hipLaunchKernelGGL(( bp_fullconnect), dim3(1),dim3(NEU_NUM1), 0, 0, iter,LEARN_RATE,gpu_omega2,gpu_bias1,gpu_F1,gpu_delta_La,gpu_delta_Lz,gpu_delta_fa,gpu_delta_fz);
hipDeviceSynchronize();
//maxpooling
hipLaunchKernelGGL(( bp_maxpooling), dim3(1),dim3(mre_size), 0, 0, iter,mre_size,LEARN_RATE,gpu_mre_index,gpu_omega1,gpu_mre,gpu_delta_fa,gpu_delta_fz,gpu_delta_ma,gpu_delta_22);
hipDeviceSynchronize();
//map
hipLaunchKernelGGL(( bp_update_kernel), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i0,LEARN_RATE,3,3,z,mre_num,re_size,gpu_mre_index,gpu_delta_22,gpu_processed_train,gpu_kernel,gpu_bias0);
hipDeviceSynchronize();
/*if(i0<10){
SAFE_CALL(hipMemcpy(kernel, gpu_kernel, sizeof(double) * (NEIGHBOR+1)*P_NUM * KER_NUM, hipMemcpyDeviceToHost));
fprintf(stdout,"gpu_kernel:%lf %lf %lf %lf\n",kernel[0],kernel[1],kernel[2],kernel[3]);
SAFE_CALL(hipMemcpy(delta_22, gpu_delta_22, sizeof(double) * re_size * KER_NUM, hipMemcpyDeviceToHost));
fprintf(stdout,"gpu_delta_22:%lf %lf %lf %lf",delta_22[0], delta_22[1], delta_22[2], delta_22[3]);
}
SAFE_CALL(hipMemcpy(omega2, gpu_omega2, sizeof(double) * ome_num2, hipMemcpyDeviceToHost));
fprintf(stdout,"Omega2:%lf %lf %lf %lf\n",omega2[0],omega2[1],omega2[2],omega2[3]);
SAFE_CALL(hipMemcpy(bias2, gpu_bias2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost));
fprintf(stdout,"bias2:%lf %lf %lf %lf\n",bias2[0],bias2[1],bias2[2],bias2[3]);*/
}
//
double single_rate = 0;
for(int i1=0; i1<valid_size; i1++){
iter = 0;
hipLaunchKernelGGL(( convol), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i1,gpu_processed_valid,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size);
hipLaunchKernelGGL(( maxpooling), dim3(1),dim3(KER_NUM), 0, 0, iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num);
hipLaunchKernelGGL(( fullconnect), dim3(1),dim3(NEU_NUM1),mre_size * sizeof(double), 0, iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size);
hipLaunchKernelGGL(( output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2);
hipDeviceSynchronize();
SAFE_CALL(hipMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
double right = count_err(valid_labels, O2, i1);
single_rate = single_rate + right;
}
single_rate = single_rate/valid_size;
fprintf(stdout,"Current correct rate:%lf\n",single_rate);
if(single_rate > 0.9){
break;
}
/*insert_line(correct_rate,single_rate);//
double new_max = max(correct_rate);//
if(cur_max < new_max){
cur_max = new_max;
count = 1;
}
else{
count ++;
}
if(count >= VALID_BATCH){
LEARN_RATE = LEARN_RATE/2;
fprintf(stdout,"LEARN RATE:%lf\n",LEARN_RATE);
count = 1;
cur_max = new_max;
}*/
}
fprintf(stdout,"Training completed!\n");
end = clock();
tt = double(end - start);
fprintf(stdout,"Using time of training:%lfs\n",tt/CLOCKS_PER_SEC);
start = clock();
//hipDeviceSynchronize();
//SAFE_CALL(hipMemcpy(kernel, gpu_kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM, hipMemcpyDeviceToHost));
//SAFE_CALL(hipMemcpy(bias0, gpu_bias0, sizeof(double) * KER_NUM, hipMemcpyDeviceToHost));
//SAFE_CALL(hipMemcpy(bias1, gpu_bias1, sizeof(double) * NEU_NUM1, hipMemcpyDeviceToHost));
//SAFE_CALL(hipMemcpy(bias2, gpu_bias2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost));
//SAFE_CALL(hipMemcpy(omega1, gpu_omega1, sizeof(double) * ome_num1, hipMemcpyDeviceToHost));
//SAFE_CALL(hipMemcpy(omega2, gpu_omega2, sizeof(double) * ome_num2, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
//fprintf(stdout,"kernel:%lf %lf %lf %lf\n",kernel[0], kernel[1], kernel[2], kernel[3]);
//mat
/*MATFile * pmatFile;
pmatFile = matOpen("model.mat","w");
mxArray * m1 = mxCreateDoubleMatrix((NEIGHBOR+1)*P_NUM,KER_NUM,mxREAL);
memcpy((void *)mxGetPr(m1), (void *)kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM);
matPutVariable(pmatFile, "kernel", m1);
mxArray * m2 = mxCreateDoubleMatrix(KER_NUM,1,mxREAL);
memcpy((void *)mxGetPr(m2), (void *)bias0, sizeof(double) * KER_NUM);
matPutVariable(pmatFile, "bias0", m2);
mxArray * m3 = mxCreateDoubleMatrix(NEU_NUM1,mre_size,mxREAL);
memcpy((void *)mxGetPr(m3), (void *)omega1, sizeof(double) * ome_num1);
matPutVariable(pmatFile, "omega1", m3);
mxArray * m4 = mxCreateDoubleMatrix(NEU_NUM1,1,mxREAL);
memcpy((void *)mxGetPr(m4), (void *)bias1, sizeof(double) * NEU_NUM1);
matPutVariable(pmatFile, "bias1", m4);
mxArray * m5 = mxCreateDoubleMatrix(NEU_NUM2,NEU_NUM1,mxREAL);
memcpy((void *)mxGetPr(m5), (void *)omega2, sizeof(double) * ome_num2);
matPutVariable(pmatFile, "omega2", m5);
mxArray * m6 = mxCreateDoubleMatrix(NEU_NUM2,1,mxREAL);
memcpy((void *)mxGetPr(m6), (void *)bias2, sizeof(double) * NEU_NUM2);
matPutVariable(pmatFile, "bias2", m6);
matClose(pmatFile);*/
//fprintf(stdout,"mre:%lf %lf %lf\n",mre[0],mre[1],mre[2]);
//fprintf(stdout,"mre_index:%d %d %d\n",mre_index[0],mre_index[1],mre_index[2]);
//fprintf(stdout,"F1 Output:%lf %lf; %lf %lf\n",F1[0],F1[1],F1[98],F1[99]);
//fprintf(stdout,"O2 Output:%lf %lf; %lf %lf\n",O2[0],O2[1],O2[18],O2[19]);
//end = clock();
//tt = double(end - start);
//fprintf(stdout, "Using time of writeback:%lfs\n",tt/CLOCKS_PER_SEC);
//test
double right = 0;
double count0 = 0;
for (int i1=0; i1<test_size; i1++){
int iter = 0;
hipLaunchKernelGGL(( convol), dim3(1),dim3(KER_NUM),(NEIGHBOR+1)*z*sizeof(double), 0, iter,i1,gpu_processed_test,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( maxpooling), dim3(1),dim3(KER_NUM), 0, 0, iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num);
hipDeviceSynchronize();
hipLaunchKernelGGL(( fullconnect), dim3(1),dim3(NEU_NUM1),mre_size * sizeof(double), 0, iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( output), dim3(1),dim3(NEU_NUM2), 0, 0, iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2);
hipDeviceSynchronize();
SAFE_CALL(hipMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
//fprintf(stdout,"\n");
right = count_err(test_labels, O2, i1);
count0 = count0 + right;
}
end = clock();
tt = double(end - start);
fprintf(stdout,"Using time of test:%lf\n",tt/CLOCKS_PER_SEC);
return count0/test_size;
}
//
int main(int argc, char * argv[])
{
if(!InitCUDA()){
return 0;
}
printf("CUDA initialized.\n");
clock_t start,end;
double *trainset,*trainlabels;
if(argc!=2){
fprintf(stderr, "4 input arguments required!");
}
MATFile * datamat = matOpen(argv[1], "r");
mxArray * train = matGetVariable(datamat,"DataSet");
mxArray * labels = matGetVariable(datamat,"labels");
trainset = (double*)mxGetData(train);
trainlabels = (double*)mxGetData(labels);
const mwSize * dim;
dim = mxGetDimensions(train);//trainset
start = clock();
double correct = training(trainset, trainlabels, dim[0], dim[1], dim[2]);
end = clock();
fprintf(stdout,"Correct Rate:%lf(300 iterations, train size, 0.2)\n",correct);
double usetime = double(end - start);
fprintf(stdout, "Using time of the whole procedure:%lfs\n",usetime/CLOCKS_PER_SEC);
return 0;
}
|
de8ea8bd804a47e318dabcddccf1502286e98d97.cu
|
#include <mat.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <matrix.h>
#include <iostream>
#include "cublas_v2.h"
#include "cokus.cpp"
#include "cuda_util.h"
#include <cuda_runtime.h>
using namespace std;
const int KER_NUM = 20;//卷积核数量
const int P_NUM = 3;//每次卷积的层数
const int LEAP = 2;//跳数
const int GP_NUM = 3;//maxpooling每组的个数
const int NEU_NUM1 = 100;
const int NEU_NUM2 = 13;//输出层神经元个数
const int NEIGHBOR = 8;//定义邻居个数
double LEARN_RATE = 0.01;
const double MIN_ERR = 0.001;
const int VALID_BATCH = 10;
//const int DATA_BATCH = 512;//每次处理512个像素对应的数据
//CUDA初始化
bool InitCUDA(){
int count;
cudaGetDeviceCount(&count);
if(count==0){
fprintf(stderr,"There is no device.\n");
return false;
}
int i;
for (i =0; i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){
if(prop.major>=1){ break;
}
}
}
if(i==count){
fprintf(stderr,"There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
//copy数据到shared memory
__device__ void copy_data_to_shared(double * data, double * data_tmp,int head, int length){
for(int i=0; i<length; i++){
data_tmp[i] = data[i+head];
}
__syncthreads();
}
//GPU端负责卷积
__global__ static void convol(int iter,int i0,double * train,double * kernel,double * re,double * bias,int x,int y,int z,int re_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;//保存当前线程编号
//每个线程负责一个卷积核与一个3*3*hight柱状图像的卷积
if (id < KER_NUM){
extern __shared__ double train_tmp[];
//__shared__ double train_tmp[9*200];
int st = i0 * x * y * z;
copy_data_to_shared(train,train_tmp,st,x*y*z);//复制train到shared memory中
/*double * ker = new double [x*y*P_NUM];//载入对应的kernel到寄存器
for(int i=0; i<x*y*P_NUM; i++){
ker[i] = kernel[id*x*y*P_NUM + i];
}*/
double mid;
//int i_1=0;
for(int i=0; i<re_size; i++){
mid = 0;
int start = i*x*y*LEAP;//训练数据每次卷积的起点
for(int j=0; j<x*y*P_NUM; j++){
mid = mid + train_tmp[start + j]*kernel[id*x*y*P_NUM+j];
}
mid = mid + bias[id];
re[i + id*re_size] = 2/(1+(1/exp(2*mid))) - 1;//激活函数tanh
}
/*for
}*/
}
}
//GPU端进行下采样
__global__ static void maxpooling(int iter,double * re,double * mre,int * mre_index,int re_size,int mre_num){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
//int res = re_size, mres = mre_num;
//extern __shared__ double re_tmp[];
//copy_data_to_shared(re, re_tmp, 0, re_size*KER_NUM);
if(id < KER_NUM){
double mid;
int mid_index;
for(int i=0; i<mre_num; i++){
mid = re[i*GP_NUM + id*re_size];//存放每组第一个值
mid_index = i*GP_NUM + id*re_size;
for(int j=i*GP_NUM+1; j<(i+1)*GP_NUM && j<re_size; j++){
if(mid < re[j + id*re_size]){
mid = re[j + id*re_size];
mid_index = j+id*re_size;
}
}
mre[i + id * mre_num] = mid;
mre_index[i + id * mre_num] = mid_index;
}
}
}
//全连接层,每个线程负责一个神经元输出结果的计算
__global__ static void fullconnect(int iter,double * mre,double * omega,double * bias,double * F1,int mre_size){
int tid = blockIdx.x * blockDim.x +threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM1){
//复制mre数组到共享内存
//__shared__ double mre_tmp[50 * KER_NUM];
extern __shared__ double mre_tmp[];
copy_data_to_shared(mre,mre_tmp,0,mre_size);
//计算神经元的输出
double mid=0;
for(int i=0; i<mre_size; i++){
mid = mid + omega[id + i*NEU_NUM1] * mre_tmp[i];
}
mid = mid + bias[id];
F1[id] = 2/(1 + 1/exp(mid * 2)) - 1;//激活函数tanh
}
}
//输出层,每个线程负责一个神经元输出结果的计算
__global__ static void output(int iter, double * F1, double * omega2, double * bias, double * O2){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM2){
//复制F1到共享内存中
__shared__ double F1_tmp[NEU_NUM1];
copy_data_to_shared(F1, F1_tmp, 0, NEU_NUM1);
//计算神经元的输出
double mid = 0;
for(int i=0; i<NEU_NUM1; i++){
mid = mid + omega2[id + i*NEU_NUM2] * F1_tmp[i];
}
O2[id] = 1/(1 + 1/exp(mid + bias[id]));//激活函数sigmoid
}
}
/*反向传播*/
//输出层
__global__ static void bp_output(int iter,int train_idx, double LEARN_RATE, double * labels, double * O2, double * bias2, double * delta_L_a, double * delta_L_z)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM2){
delta_L_a[id] = -(labels[id + train_idx * NEU_NUM2] - O2[id]);
delta_L_z[id] = delta_L_a[id] * O2[id] *(1 - O2[id]);
bias2[id] = bias2[id] - delta_L_z[id]*LEARN_RATE;
}
}
//全连接层
__global__ static void bp_fullconnect(int iter, double LEARN_RATE, double * omega2,double * bias1, double * F1, double * delta_L_a, double * delta_L_z, double *delta_f_a, double * delta_f_z)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < NEU_NUM1){
double mid = 0;
double delta_f_w;
for(int i=0; i<NEU_NUM2; i++){
mid = mid + omega2[i + id*NEU_NUM2] * delta_L_z[i];
//delta_f_b[i] = delta_L_z[i];
delta_f_w = F1[id] * delta_L_z[i];
omega2[i + id*NEU_NUM2] = omega2[i + id*NEU_NUM2] - LEARN_RATE * delta_f_w;
//bias2[i] = bias2[i] - LEARN_RATE*delta_f_b[i];
}
delta_f_a[id] = mid;
delta_f_z[id] = delta_f_a[id] * (1 + F1[id]) * (1 - F1[id]);
bias1[id] = bias1[id] - LEARN_RATE * delta_f_z[id];
}
}
//maxpooling层(并将delta_a映射到卷积层的delta_z)
__global__ static void bp_maxpooling(int iter, int mre_size,double LEARN_RATE, int *mre_index, double * omega1,double *mre, double * delta_f_a, double * delta_f_z, double * delta_m_a, double * delta_22)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < mre_size){
double mid = 0;
double delta_m_w;
for(int i=0; i<NEU_NUM1; i++){
mid = mid + omega1[i + id*NEU_NUM1] * delta_f_z[i];
//delta_2[i + id*NEU_NUM1] = mid;
delta_m_w = mre[id] * delta_f_z[i];
omega1[i + id*NEU_NUM1] = omega1[i + id*NEU_NUM1] - LEARN_RATE * delta_m_w;
}
delta_m_a[id] = mid;
//delta_2[id] = delta_m_a[id];
//int idx = mre_index[id];
delta_22[mre_index[id]] = delta_m_a[id] * (1 + mre[id]) * (1 - mre[id]);
}
}
//计算并更新kernel
__global__ static void bp_update_kernel(int iter,int i0, double LEARN_RATE, int x, int y, int z, int mre_num,int re_size, int * mre_index, double * delta_22, double * data, double * kernel,double * bias0)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum =blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
if(id < KER_NUM){
extern __shared__ double train_tmp[];
copy_data_to_shared(data, train_tmp, x*y*z*i0, x*y*z);
double * delta_k_w = new double [x*y*P_NUM];
for(int i=0; i<x*y*P_NUM;i++)
delta_k_w[i] = 0;
double mid = 0;
for (int i=0; i<mre_num; i++){
int idx = mre_index[i + id*mre_num];
int n = idx % re_size;//对应卷积的第n块数据
int head = x*y*LEAP*n;
for(int j=0; j<x*y*P_NUM; j++){
delta_k_w[j] = delta_k_w[j] + delta_22[idx] * train_tmp[j+head];
}
mid = mid + delta_22[idx];
}
for(int i=0;i<x*y*P_NUM;i++){
delta_k_w[i] = delta_k_w[i]/mre_num;
kernel[id*x*y*P_NUM+i] = kernel[id*x*y*P_NUM+i] - LEARN_RATE*delta_k_w[i];
}
//double delta_k_b = delta_22[idx];
bias0[id] = bias0[id] - LEARN_RATE*(mid/mre_num);
delete [] delta_k_w;
}
}
//数据预处理
__global__ static void processing(int iter, double * data, int * train_index, double * processed_data, int x, int y, int z, int train_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int threadNum = blockDim.x * gridDim.x;
int id = tid + iter * threadNum;
//int idx = id * (NEIGHBOR+1) * z;//记录processed_data的开始位置
if (id < train_size){
int idx = id * (NEIGHBOR+1) * z;
for (int i=0; i<z; i++){
for (int j=0; j<(NEIGHBOR+1); j++){
processed_data[idx] = data[train_index[j + id*(NEIGHBOR+1)] + i * x*y];
idx = idx + 1;
}
}
}
}
double lossfunction(double * output, double * labels, int idx){
double l = 0;
for(int i=0; i<NEU_NUM2; i++){
l = l + (output[i] - labels[i + idx*NEU_NUM2]) * (output[i] - labels[i + idx*NEU_NUM2]);
}
l = l/2;
return l;
}
//计算正确率
double count_err(double * test_labels, double * output, int test_idx)
{
double right=0;
double max =0;
int idx = 0;
for(int i=0; i<NEU_NUM2; i++){
if(output[i]>max){
max = output[i];
idx = i;
}
}
if((idx+1) == int(test_labels[test_idx]))
right = 1;
return right;
}
//插入队列
void insert_line(double * a, double b){
for(int i=1; i<VALID_BATCH; i++){
a[i-1] = a[i];
}
a[VALID_BATCH-1] = b;
}
double max(double * a){
double m=a[0];
for(int i=1; i<VALID_BATCH; i++){
if(m<a[i])
m=a[i];
}
return m;
}
//shuffle
void shuffle(int * data, double * labels, int dim_row, int width){
int index, i;
int temp;
double tmp;
srand(time(NULL));
for(i=0; i<width; i++){
index=rand()%(width-i) + i;
if(index != i){
for(int j=0; j<dim_row; j++){
temp = data[j + i*dim_row];
data[j + i*dim_row] = data[j +index*dim_row];
data[j + index*dim_row] = temp;
}
for(int j=0; j<NEU_NUM2; j++){
tmp = labels[j + i*NEU_NUM2];
labels[j + i*NEU_NUM2] = labels[j + index*NEU_NUM2];
labels[j + index*NEU_NUM2] = tmp;
}
}
}
}
//训练
double training(double * data, double * labels, int x, int y, int z){
clock_t start, end;
start = clock();
double * gpu_data;//显存上存储原始数据
double * gpu_processed_train;//显存上存储处理之后的数据
double * gpu_processed_test;
double * gpu_processed_valid;
int * gpu_train_index;//训练数据的索引
int * gpu_test_index;
int * gpu_valid_index;
double * gpu_processed_labels;
//double * gpu_test_labels;
//计算有标签像素的个数
int data_size = 0;
int * data_index = new int [x*y];
for(int i=0; i<x*y; i++){
if(labels[i] != 0){
data_index[data_size]=i;
data_size ++;
}
}
int test_size = (data_size-1)/6 + 1;
int valid_size = test_size;
int train_size = data_size - test_size - valid_size;
fprintf(stdout,"train_size:%d test_size:%d\n valid_size:%d\n",train_size,test_size,valid_size);
int * train_index = new int [train_size * (NEIGHBOR + 1)];//9行,x*y列。每列保存一个像素及其邻居的索引位置
int * valid_index = new int [valid_size * (NEIGHBOR + 1)];
int * test_index = new int [test_size * (NEIGHBOR+1)];
double * processed_labels = new double [train_size * NEU_NUM2]();
double * test_labels = new double [test_size]();
double * valid_labels = new double [valid_size]();
int tr=0, te=0, va=0;
for (int i=0; i<data_size; i++){
if (i%6 != 0 && i%6 != 1){
train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1)] = data_index[i];//当前像素索引
train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) - 1] = data_index[i] - 1;
train_index[(NEIGHBOR/2) + tr * (NEIGHBOR+1) + 1] = data_index[i] + 1;
for(int j0=0;j0<3;j0++){
train_index[j0 + tr * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0;
train_index[j0+6 + tr * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0;
}
if((data_index[i] % x) == 0){//第一行
for (int j=0; j<3; j++)
train_index[j*3 + tr*(NEIGHBOR+1)] = train_index[j*3+2 + tr*(NEIGHBOR+1)];
}
if((data_index[i] % x) == (x-1)){//最后一行
for(int j=0;j<3;j++)
train_index[j*3+2 + tr*(NEIGHBOR+1)] = train_index[j*3 + tr*(NEIGHBOR+1)];
}
if((data_index[i]/x) == 0){//第一列
for(int j=0;j<3;j++)
train_index[j + tr*(NEIGHBOR+1)] = train_index[j+6 + tr*(NEIGHBOR+1)];
}
if((data_index[i]/x) == (y-1)){//最后一列
for(int j=0;j<3;j++)
train_index[j+6 + tr*(NEIGHBOR+1)] = train_index[j + tr*(NEIGHBOR+1)];
}
int mid = int(labels[data_index[i]])-1 + tr*NEU_NUM2;
processed_labels[mid] = 1;
tr = tr + 1;
}
if(i%6 == 0){
test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1)] = data_index[i];//当前像素索引
test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) - 1] = data_index[i] - 1;
test_index[(NEIGHBOR/2) + te * (NEIGHBOR+1) + 1] = data_index[i] + 1;
for(int j0=0;j0<3;j0++){
test_index[j0 + te * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0;
test_index[j0+6 + te * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0;
}
if((data_index[i] % x) == 0){//第一行
for (int j=0; j<3; j++)
test_index[j*3 + te*(NEIGHBOR+1)] = test_index[j*3+2 + te*(NEIGHBOR+1)];
}
if((data_index[i] % x) == (x-1)){//最后一行
for(int j=0;j<3;j++)
test_index[j*3+2 + te*(NEIGHBOR+1)] = test_index[j*3 + te*(NEIGHBOR+1)];
}
if((data_index[i]/x) == 0){//第一列
for(int j=0;j<3;j++)
test_index[j + te*(NEIGHBOR+1)] = test_index[j+6 + te*(NEIGHBOR+1)];
}
if((data_index[i]/x) == (y-1)){//最后一列
for(int j=0;j<3;j++)
test_index[j+6 + te*(NEIGHBOR+1)] = test_index[j + te*(NEIGHBOR+1)];
}
//int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2;
test_labels[te] = labels[data_index[i]];
te = te + 1;
}
if(i%6 == 1){
valid_index[(NEIGHBOR/2) + va * (NEIGHBOR+1)] = data_index[i];//当前像素索引
valid_index[(NEIGHBOR/2) + va * (NEIGHBOR+1) - 1] = data_index[i] - 1;
valid_index[(NEIGHBOR/2) + va * (NEIGHBOR+1) + 1] = data_index[i] + 1;
for(int j0=0;j0<3;j0++){
valid_index[j0 + va * (NEIGHBOR+1)] = data_index[i] - 1 - x + j0;
valid_index[j0+6 + va * (NEIGHBOR+1)] = data_index[i] - 1 + x + j0;
}
if((data_index[i] % x) == 0){//第一行
for (int j=0; j<3; j++)
valid_index[j*3 + va*(NEIGHBOR+1)] = valid_index[j*3+2 + va*(NEIGHBOR+1)];
}
if((data_index[i] % x) == (x-1)){//最后一行
for(int j=0;j<3;j++)
valid_index[j*3+2 + va*(NEIGHBOR+1)] = valid_index[j*3 + va*(NEIGHBOR+1)];
}
if((data_index[i]/x) == 0){//第一列
for(int j=0;j<3;j++)
valid_index[j + va*(NEIGHBOR+1)] = valid_index[j+6 + va*(NEIGHBOR+1)];
}
if((data_index[i]/x) == (y-1)){//最后一列
for(int j=0;j<3;j++)
valid_index[j+6 + va*(NEIGHBOR+1)] = test_index[j + va*(NEIGHBOR+1)];
}
//int mid = int(labels[data_index[i]])-1 + te*NEU_NUM2;
valid_labels[va] = labels[data_index[i]];
va = va + 1;
}
}
shuffle(train_index, processed_labels, (NEIGHBOR+1), train_size);//打乱训练数据的顺序
fprintf(stdout,"train_size:%d\n",train_size);
fprintf(stdout,"train_index:%d %d %d %d\ntest_index:%d %d %d %d\nvalid_index:%d %d %d %d\n",train_index[0],train_index[1],train_index[2],train_index[3],test_index[0],test_index[1],test_index[2],test_index[3],valid_index[0],valid_index[1],valid_index[2],valid_index[3]);
fprintf(stdout,"train labels:\n");
for(int i=0; i<NEU_NUM2; i++){
fprintf(stdout,"%lf ",processed_labels[i]);
}
fprintf(stdout,"\n");
fprintf(stdout,"test label:%lf",test_labels[0]);
fprintf(stdout,"valid label:%lf",valid_labels[0]);
//int * train_index = new int [train_size * (NEIGHBOR + 1)];//train_size列,9行。每行保存一个像素及其邻居的索引位置
//分配显存,拷贝数据到显存上
SAFE_CALL(cudaMalloc((void **) &gpu_data, sizeof(double) * x * y * z));
SAFE_CALL(cudaMemcpy(gpu_data, data, sizeof(double)* x * y * z, cudaMemcpyHostToDevice));
SAFE_CALL(cudaMalloc((void **) &gpu_train_index, sizeof(int) * train_size * (NEIGHBOR+1)));
SAFE_CALL(cudaMemcpy(gpu_train_index, train_index, sizeof(int) * train_size * (NEIGHBOR+1), cudaMemcpyHostToDevice));
SAFE_CALL(cudaMalloc((void **) &gpu_test_index, sizeof(int) * test_size * (NEIGHBOR+1)));
SAFE_CALL(cudaMemcpy(gpu_test_index, test_index, sizeof(int) * test_size * (NEIGHBOR+1), cudaMemcpyHostToDevice));
SAFE_CALL(cudaMalloc((void **) &gpu_valid_index, sizeof(int) * valid_size * (NEIGHBOR+1)));
SAFE_CALL(cudaMemcpy(gpu_valid_index, valid_index, sizeof(int) * valid_size * (NEIGHBOR+1), cudaMemcpyHostToDevice));
SAFE_CALL(cudaMalloc((void **) &gpu_processed_valid, sizeof(double) * valid_size * (NEIGHBOR+1) * z));
SAFE_CALL(cudaMalloc((void **) &gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z));
SAFE_CALL(cudaMalloc((void **) &gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z));//每一批数据的大小
int gridsize = 64;
int blocksize = 1024;
//int threadNum = gridsize * blocksize;
double * processed_train = new double [train_size * (NEIGHBOR+1) * z];
double * processed_test = new double [test_size * (NEIGHBOR+1) * z];
double * processed_valid = new double [valid_size * (NEIGHBOR+1) * z];
//预处理
int iter=0;
processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_train_index, gpu_processed_train, x, y, z, train_size);
processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_test_index, gpu_processed_test, x, y, z, test_size);
processing<<<gridsize,blocksize>>>(iter, gpu_data, gpu_valid_index, gpu_processed_valid, x, y, z, valid_size);
cudaDeviceSynchronize();
end = clock();
double tt = double(end - start);
fprintf(stdout,"Using time of preprocessing:%lf\n",tt/CLOCKS_PER_SEC);
SAFE_CALL(cudaMemcpy(processed_train, gpu_processed_train, sizeof(double) * train_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost));
SAFE_CALL(cudaMemcpy(processed_test, gpu_processed_test, sizeof(double) * test_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost));
SAFE_CALL(cudaMemcpy(processed_valid, gpu_processed_valid, sizeof(double) * valid_size * (NEIGHBOR+1) * z, cudaMemcpyDeviceToHost));
SAFE_CALL(cudaFree(gpu_data));
SAFE_CALL(cudaFree(gpu_train_index));
SAFE_CALL(cudaFree(gpu_test_index));
SAFE_CALL(cudaFree(gpu_valid_index));
cudaDeviceSynchronize();
fprintf(stdout,"Processed train data:%lf %lf %lf %lf\n",processed_train[0],processed_train[1],processed_train[2],processed_train[3]);
fprintf(stdout,"Processed test data:%lf %lf %lf %lf\n",processed_test[0],processed_test[1],processed_test[2],processed_test[3]);
fprintf(stdout,"processed valid data:%lf %lf %lf %lf\n",processed_valid[0],processed_valid[1],processed_valid[2],processed_valid[3]);
start = clock();
//前向传播
double * kernel = new double [(NEIGHBOR+1)*P_NUM*KER_NUM];
//随机生成kernekl数组
for(int i=0; i<(NEIGHBOR+1)*P_NUM*KER_NUM; i++){
kernel[i] = 2*(rand()/(double)(RAND_MAX)) - 1 ;
kernel[i] = kernel[i]/20;
if(kernel[i] == 0 )
kernel[i] = 0.005;
}
fprintf(stdout,"kernel:%lf %lf %lf %lf\n",kernel[0], kernel[1], kernel[2], kernel[3]);
//计算每次卷积的结果个数
int re_size = 0;
for (int i=0; i+P_NUM-1<z; i+=LEAP){
re_size ++;
}
//double * re = new double [re_size * KER_NUM];
fprintf(stdout,"Size of re:%d\n",re_size);
int mre_num = re_size/GP_NUM + 1;
if(re_size/GP_NUM == 0){
mre_num = re_size / GP_NUM;
}
fprintf(stdout,"mre_num:%d\n",mre_num);
int mre_size = mre_num * KER_NUM;
int ome_num1 = mre_num * KER_NUM * NEU_NUM1;//第一层网络的输入权重个数
int ome_num2 = NEU_NUM1 * NEU_NUM2;//输出层的权重个数
double * gpu_kernel;
double * gpu_bias0;
double * gpu_re;//存放卷积结果
double * gpu_mre;//存放maxpooling结果
int * gpu_mre_index;//存放每组最大值的索引
double * gpu_omega1;//第一层网络的输入权重
double * gpu_F1;//第一层神经元的输出
double * gpu_bias1;
double * gpu_omega2;
double * gpu_O2;
double * gpu_bias2;
double * gpu_delta_La;
double * gpu_delta_Lz;
double * gpu_delta_fa;
double * gpu_delta_fz;
double * gpu_delta_ma;
double * gpu_delta_22;
double * delta_22 = new double [re_size*KER_NUM]();
//复制标签
SAFE_CALL(cudaMalloc((void**) &gpu_processed_labels, sizeof(double) * train_size * NEU_NUM2));
SAFE_CALL(cudaMemcpy(gpu_processed_labels,processed_labels,sizeof(double) * train_size * NEU_NUM2,cudaMemcpyHostToDevice));
//复制随机初始化的kernel数组
SAFE_CALL(cudaMalloc((void**) &gpu_kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM));
SAFE_CALL(cudaMemcpy(gpu_kernel,kernel,sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM,cudaMemcpyHostToDevice));
//卷积结果存入gpu_re,分配显存
SAFE_CALL(cudaMalloc((void **) &gpu_re,sizeof(double) * re_size * KER_NUM));
//输出层偏导数
SAFE_CALL(cudaMalloc((void **) &gpu_delta_La, sizeof(double) * NEU_NUM2));
SAFE_CALL(cudaMalloc((void **) &gpu_delta_Lz, sizeof(double) * NEU_NUM2));
//全连接层偏导数
SAFE_CALL(cudaMalloc((void **) &gpu_delta_fa, sizeof(double) * NEU_NUM1));
SAFE_CALL(cudaMalloc((void **) &gpu_delta_fz, sizeof(double) * NEU_NUM1));
//maxpooling
SAFE_CALL(cudaMalloc((void **) &gpu_delta_ma, sizeof(double) * mre_size));
//SAFE_CALL(cudaMalloc((void **) &gpu_delta_mz, sizeof(double) * mre_size));
//输入层
//SAFE_CALL(cudaMalloc((void **) &gpu_delta_2, sizeof(double) * mre_size));
SAFE_CALL(cudaMalloc((void **) &gpu_delta_22,sizeof(double) * re_size * KER_NUM));
SAFE_CALL(cudaMemcpy(gpu_delta_22, delta_22, sizeof(double) * re_size * KER_NUM, cudaMemcpyHostToDevice));
//SAFE_CALL(cudaMalloc((void **) &gpu_delta_kw, sizeof(double) * (NEIGHBOR+1) *P_NUM));
double * omega1 = new double [ome_num1];
double * omega2 = new double [ome_num2];
double * bias0 = new double [KER_NUM];
double * bias1 = new double [NEU_NUM1];
double * bias2 = new double [NEU_NUM2];
//随机生成Omega1
for(int i=0; i<ome_num1; i++){
omega1[i] = 2 * (rand()/(double)(RAND_MAX)) - 1;
omega1[i] = omega1[i]/20;
if(omega1[i] == 0)
omega1[i] = 0.01;
}
//随机生成bias0
for(int i=0; i<KER_NUM; i++){
bias0[i] = 2*(rand()/(double)(RAND_MAX)) - 1;
bias0[i] = bias0[i]/20;
}
//随机生成bias1
for(int i=0; i<NEU_NUM1; i++){
bias1[i] = 2*(rand()/(double)(RAND_MAX)) - 1;
bias1[i] = bias1[i]/20;
}
//随机生成Omega2
for(int i=0; i<ome_num2; i++){
omega2[i] = 2 * (rand()/(double)(RAND_MAX)) - 1;
omega2[i] = omega2[i]/20;
if(omega2[i] ==0)
omega2[i] = 0.01;
}
fprintf(stdout, "Bias1: %lf %lf %lf\n",bias1[0],bias1[1],bias1[2]);
//随机生成bias2
for(int i=0; i<NEU_NUM2; i++){
bias2[i] = 2*(rand()/(double)(RAND_MAX)) - 1;
bias2[i] = bias2[i]/20;
}
fprintf(stdout, "Bias2: %lf %lf %lf\n",bias2[0],bias2[1],bias2[2]);
SAFE_CALL(cudaMalloc((void **) &gpu_mre, sizeof(double) * mre_num * KER_NUM));//maxpooling结果存入gpu_mre,分配显存
SAFE_CALL(cudaMalloc((void **) &gpu_mre_index, sizeof(int) * mre_num * KER_NUM));//为maxpooling的最大值索引分配显存
SAFE_CALL(cudaMalloc((void **) &gpu_omega1, sizeof(double) * ome_num1));//第一层网络的输入权重,分配显存
SAFE_CALL(cudaMalloc((void **) &gpu_omega2, sizeof(double) * ome_num2));//输出层的权重,分配显存
SAFE_CALL(cudaMalloc((void **) &gpu_F1, sizeof(double) * NEU_NUM1));//第一层网络的输出,分配显存
SAFE_CALL(cudaMalloc((void **) &gpu_O2, sizeof(double) * NEU_NUM2));//输出层的结果
SAFE_CALL(cudaMalloc((void **) &gpu_bias0, sizeof(double) * KER_NUM));//卷积层偏置值
SAFE_CALL(cudaMalloc((void **) &gpu_bias1, sizeof(double) * NEU_NUM1));//全连接层偏置值
SAFE_CALL(cudaMalloc((void **) &gpu_bias2, sizeof(double) * NEU_NUM2));//输出层偏置
SAFE_CALL(cudaMemcpy(gpu_omega1, omega1, sizeof(double) * ome_num1, cudaMemcpyHostToDevice));//复制初始权重到GPU端
SAFE_CALL(cudaMemcpy(gpu_omega2, omega2, sizeof(double) * ome_num2, cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(gpu_bias0, bias0, sizeof(double) * KER_NUM, cudaMemcpyHostToDevice));
SAFE_CALL(cudaMemcpy(gpu_bias1, bias1, sizeof(double) * NEU_NUM1, cudaMemcpyHostToDevice));//复制偏置值到显存
SAFE_CALL(cudaMemcpy(gpu_bias2, bias2, sizeof(double) * NEU_NUM2, cudaMemcpyHostToDevice));
//double * mre = new double [mre_num * KER_NUM];//CPU端存放maxpooling结果
//double * F1 = new double [NEU_NUM1];//CPU端存放第一层网络输出结果
double * O2 = new double [NEU_NUM2];//CPU端存放输出层的结果
//double * lz = new double [NEU_NUM2];
//double loss;
double * correct_rate = new double [VALID_BATCH];
double cur_max = 0;//保存当前最大的正确率
int count=1;
for(int j=0; j<301; j++){
//if (j % 100 == 0)
// fprintf(stdout,"The %dth iteration.\n",j);
//loss = 0;
for(int i0=0; i0<train_size; i0++){
int iter = 0;
//卷积,每个线程负责一个卷积核和训练数据的卷积
convol<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i0,gpu_processed_train,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size);
cudaDeviceSynchronize();
//下采样,maxpooling方法,每个线程负责re的一列
maxpooling<<<1,KER_NUM>>>(iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num);
cudaDeviceSynchronize();
//全连接层
fullconnect<<<1,NEU_NUM1,mre_size * sizeof(double)>>>(iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size);
cudaDeviceSynchronize();
//输出层
output<<<1,NEU_NUM2>>>(iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2);
cudaDeviceSynchronize();
//SAFE_CALL(cudaMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost));
//cudaDeviceSynchronize();
//double single_loss = lossfunction(O2, processed_labels, i0);
//loss = loss + single_loss;
//反向传播,输出层
bp_output<<<1,NEU_NUM2>>>(iter,i0,LEARN_RATE,gpu_processed_labels,gpu_O2,gpu_bias2,gpu_delta_La,gpu_delta_Lz);
cudaDeviceSynchronize();
//反向传播,全连接层
bp_fullconnect<<<1,NEU_NUM1>>>(iter,LEARN_RATE,gpu_omega2,gpu_bias1,gpu_F1,gpu_delta_La,gpu_delta_Lz,gpu_delta_fa,gpu_delta_fz);
cudaDeviceSynchronize();
//反向传播,maxpooling层
bp_maxpooling<<<1,mre_size>>>(iter,mre_size,LEARN_RATE,gpu_mre_index,gpu_omega1,gpu_mre,gpu_delta_fa,gpu_delta_fz,gpu_delta_ma,gpu_delta_22);
cudaDeviceSynchronize();
//反向传播,map到卷积层
bp_update_kernel<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i0,LEARN_RATE,3,3,z,mre_num,re_size,gpu_mre_index,gpu_delta_22,gpu_processed_train,gpu_kernel,gpu_bias0);
cudaDeviceSynchronize();
/*if(i0<10){
SAFE_CALL(cudaMemcpy(kernel, gpu_kernel, sizeof(double) * (NEIGHBOR+1)*P_NUM * KER_NUM, cudaMemcpyDeviceToHost));
fprintf(stdout,"gpu_kernel:%lf %lf %lf %lf\n",kernel[0],kernel[1],kernel[2],kernel[3]);
SAFE_CALL(cudaMemcpy(delta_22, gpu_delta_22, sizeof(double) * re_size * KER_NUM, cudaMemcpyDeviceToHost));
fprintf(stdout,"gpu_delta_22:%lf %lf %lf %lf",delta_22[0], delta_22[1], delta_22[2], delta_22[3]);
}
SAFE_CALL(cudaMemcpy(omega2, gpu_omega2, sizeof(double) * ome_num2, cudaMemcpyDeviceToHost));
fprintf(stdout,"Omega2:%lf %lf %lf %lf\n",omega2[0],omega2[1],omega2[2],omega2[3]);
SAFE_CALL(cudaMemcpy(bias2, gpu_bias2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost));
fprintf(stdout,"bias2:%lf %lf %lf %lf\n",bias2[0],bias2[1],bias2[2],bias2[3]);*/
}
//测试验证集上的准确率
double single_rate = 0;
for(int i1=0; i1<valid_size; i1++){
iter = 0;
convol<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i1,gpu_processed_valid,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size);
maxpooling<<<1,KER_NUM>>>(iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num);
fullconnect<<<1,NEU_NUM1,mre_size * sizeof(double)>>>(iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size);
output<<<1,NEU_NUM2>>>(iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2);
cudaDeviceSynchronize();
SAFE_CALL(cudaMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
double right = count_err(valid_labels, O2, i1);
single_rate = single_rate + right;
}
single_rate = single_rate/valid_size;
fprintf(stdout,"Current correct rate:%lf\n",single_rate);
if(single_rate > 0.9){
break;
}
/*insert_line(correct_rate,single_rate);//将当前的正确率插入队列
double new_max = max(correct_rate);//计算当前队列中最大的正确率
if(cur_max < new_max){
cur_max = new_max;
count = 1;
}
else{
count ++;
}
if(count >= VALID_BATCH){
LEARN_RATE = LEARN_RATE/2;
fprintf(stdout,"LEARN RATE:%lf\n",LEARN_RATE);
count = 1;
cur_max = new_max;
}*/
}
fprintf(stdout,"Training completed!\n");
end = clock();
tt = double(end - start);
fprintf(stdout,"Using time of training:%lfs\n",tt/CLOCKS_PER_SEC);
start = clock();
//cudaDeviceSynchronize();
//SAFE_CALL(cudaMemcpy(kernel, gpu_kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM, cudaMemcpyDeviceToHost));
//SAFE_CALL(cudaMemcpy(bias0, gpu_bias0, sizeof(double) * KER_NUM, cudaMemcpyDeviceToHost));
//SAFE_CALL(cudaMemcpy(bias1, gpu_bias1, sizeof(double) * NEU_NUM1, cudaMemcpyDeviceToHost));
//SAFE_CALL(cudaMemcpy(bias2, gpu_bias2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost));
//SAFE_CALL(cudaMemcpy(omega1, gpu_omega1, sizeof(double) * ome_num1, cudaMemcpyDeviceToHost));
//SAFE_CALL(cudaMemcpy(omega2, gpu_omega2, sizeof(double) * ome_num2, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
//fprintf(stdout,"kernel:%lf %lf %lf %lf\n",kernel[0], kernel[1], kernel[2], kernel[3]);
//将训练完的参数写入mat文件
/*MATFile * pmatFile;
pmatFile = matOpen("model.mat","w");
mxArray * m1 = mxCreateDoubleMatrix((NEIGHBOR+1)*P_NUM,KER_NUM,mxREAL);
memcpy((void *)mxGetPr(m1), (void *)kernel, sizeof(double) * (NEIGHBOR+1) * P_NUM * KER_NUM);
matPutVariable(pmatFile, "kernel", m1);
mxArray * m2 = mxCreateDoubleMatrix(KER_NUM,1,mxREAL);
memcpy((void *)mxGetPr(m2), (void *)bias0, sizeof(double) * KER_NUM);
matPutVariable(pmatFile, "bias0", m2);
mxArray * m3 = mxCreateDoubleMatrix(NEU_NUM1,mre_size,mxREAL);
memcpy((void *)mxGetPr(m3), (void *)omega1, sizeof(double) * ome_num1);
matPutVariable(pmatFile, "omega1", m3);
mxArray * m4 = mxCreateDoubleMatrix(NEU_NUM1,1,mxREAL);
memcpy((void *)mxGetPr(m4), (void *)bias1, sizeof(double) * NEU_NUM1);
matPutVariable(pmatFile, "bias1", m4);
mxArray * m5 = mxCreateDoubleMatrix(NEU_NUM2,NEU_NUM1,mxREAL);
memcpy((void *)mxGetPr(m5), (void *)omega2, sizeof(double) * ome_num2);
matPutVariable(pmatFile, "omega2", m5);
mxArray * m6 = mxCreateDoubleMatrix(NEU_NUM2,1,mxREAL);
memcpy((void *)mxGetPr(m6), (void *)bias2, sizeof(double) * NEU_NUM2);
matPutVariable(pmatFile, "bias2", m6);
matClose(pmatFile);*/
//fprintf(stdout,"mre:%lf %lf %lf\n",mre[0],mre[1],mre[2]);
//fprintf(stdout,"mre_index:%d %d %d\n",mre_index[0],mre_index[1],mre_index[2]);
//fprintf(stdout,"F1 Output:%lf %lf; %lf %lf\n",F1[0],F1[1],F1[98],F1[99]);
//fprintf(stdout,"O2 Output:%lf %lf; %lf %lf\n",O2[0],O2[1],O2[18],O2[19]);
//end = clock();
//tt = double(end - start);
//fprintf(stdout, "Using time of writeback:%lfs\n",tt/CLOCKS_PER_SEC);
//test
double right = 0;
double count0 = 0;
for (int i1=0; i1<test_size; i1++){
int iter = 0;
convol<<<1,KER_NUM,(NEIGHBOR+1)*z*sizeof(double)>>>(iter,i1,gpu_processed_test,gpu_kernel,gpu_re,gpu_bias0,3,3,z,re_size);
cudaDeviceSynchronize();
maxpooling<<<1,KER_NUM>>>(iter,gpu_re,gpu_mre,gpu_mre_index,re_size,mre_num);
cudaDeviceSynchronize();
fullconnect<<<1,NEU_NUM1,mre_size * sizeof(double)>>>(iter,gpu_mre,gpu_omega1,gpu_bias1,gpu_F1,mre_size);
cudaDeviceSynchronize();
output<<<1,NEU_NUM2>>>(iter,gpu_F1,gpu_omega2,gpu_bias2,gpu_O2);
cudaDeviceSynchronize();
SAFE_CALL(cudaMemcpy(O2, gpu_O2, sizeof(double) * NEU_NUM2, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
//fprintf(stdout,"\n");
right = count_err(test_labels, O2, i1);
count0 = count0 + right;
}
end = clock();
tt = double(end - start);
fprintf(stdout,"Using time of test:%lf\n",tt/CLOCKS_PER_SEC);
return count0/test_size;
}
//主函数
int main(int argc, char * argv[])
{
if(!InitCUDA()){
return 0;
}
printf("CUDA initialized.\n");
clock_t start,end;
double *trainset,*trainlabels;
if(argc!=2){
fprintf(stderr, "4 input arguments required!");
}
MATFile * datamat = matOpen(argv[1], "r");
mxArray * train = matGetVariable(datamat,"DataSet");
mxArray * labels = matGetVariable(datamat,"labels");
trainset = (double*)mxGetData(train);
trainlabels = (double*)mxGetData(labels);
const mwSize * dim;
dim = mxGetDimensions(train);//获取trainset每维的元素个数
start = clock();
double correct = training(trainset, trainlabels, dim[0], dim[1], dim[2]);
end = clock();
fprintf(stdout,"Correct Rate:%lf(300 iterations, train size, 0.2)\n",correct);
double usetime = double(end - start);
fprintf(stdout, "Using time of the whole procedure:%lfs\n",usetime/CLOCKS_PER_SEC);
return 0;
}
|
3daee48cc4a146e262c3ed0a67203d71af5cbcbb.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by goforbroke on 26.12.2020.
//
#include <iostream>
#include <cstdlib>
#include <hip/hip_runtime_api.h>
__global__ void vector_add(float *out, float *a, float *b, int n) {
size_t index = threadIdx.x;
size_t stride = blockDim.x;
for (int load = index; load < n; load += stride) { // fake addition load
for (int i = index; i < n; i += stride) {
out[i] = a[i] + b[i];
}
}
}
int main(int argc, char **argv) {
size_t NSAMPLES = atoi(argv[1]);
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate memory
a = (float *) malloc(sizeof(float) * NSAMPLES);
b = (float *) malloc(sizeof(float) * NSAMPLES);
out = (float *) malloc(sizeof(float) * NSAMPLES);
hipMalloc((void **) &d_a, sizeof(float) * NSAMPLES);
hipMalloc((void **) &d_b, sizeof(float) * NSAMPLES);
hipMalloc((void **) &d_out, sizeof(float) * NSAMPLES);
// Initialize array
for (long i = 0; i < NSAMPLES; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
hipMemcpy(d_a, a, sizeof(float) * NSAMPLES, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * NSAMPLES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add), dim3(256), dim3(256), 0, 0, d_out, d_a, d_b, NSAMPLES);
hipError_t err = hipGetLastError(); // add
if (err != hipSuccess)
std::cout << "CUDA error: " << hipGetErrorString(err) << std::endl; // add
hipProfilerStop();
hipMemcpy(out, d_out, sizeof(float) * NSAMPLES, hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
free(a);
free(b);
free(out);
}
|
3daee48cc4a146e262c3ed0a67203d71af5cbcbb.cu
|
//
// Created by goforbroke on 26.12.2020.
//
#include <iostream>
#include <cstdlib>
#include <cuda_profiler_api.h>
__global__ void vector_add(float *out, float *a, float *b, int n) {
size_t index = threadIdx.x;
size_t stride = blockDim.x;
for (int load = index; load < n; load += stride) { // fake addition load
for (int i = index; i < n; i += stride) {
out[i] = a[i] + b[i];
}
}
}
int main(int argc, char **argv) {
size_t NSAMPLES = atoi(argv[1]);
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate memory
a = (float *) malloc(sizeof(float) * NSAMPLES);
b = (float *) malloc(sizeof(float) * NSAMPLES);
out = (float *) malloc(sizeof(float) * NSAMPLES);
cudaMalloc((void **) &d_a, sizeof(float) * NSAMPLES);
cudaMalloc((void **) &d_b, sizeof(float) * NSAMPLES);
cudaMalloc((void **) &d_out, sizeof(float) * NSAMPLES);
// Initialize array
for (long i = 0; i < NSAMPLES; i++) {
a[i] = 1.0f;
b[i] = 2.0f;
}
cudaMemcpy(d_a, a, sizeof(float) * NSAMPLES, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * NSAMPLES, cudaMemcpyHostToDevice);
vector_add<<<256, 256>>>(d_out, d_a, d_b, NSAMPLES);
cudaError_t err = cudaGetLastError(); // add
if (err != cudaSuccess)
std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; // add
cudaProfilerStop();
cudaMemcpy(out, d_out, sizeof(float) * NSAMPLES, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
free(a);
free(b);
free(out);
}
|
4b39e059696ca6543e62ff7918de85e396b832ab.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <hip/hip_runtime.h>
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
static const char *sSDKsample = "[stereoDisparity]\0";
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! CUDA Sample for calculating depth maps
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// This will pick the best possible CUDA capable device
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20)
{
printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample);
exit(EXIT_SUCCESS);
}
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Search parameters
int minDisp = -16;
int maxDisp = 0;
// Load image data
//allocate mem for the images on host side
//initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
unsigned char *h_img0 = NULL;
unsigned char *h_img1 = NULL;
unsigned int w, h;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
printf("Loaded <%s> as image 0\n", fname0);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
}
printf("Loaded <%s> as image 1\n", fname1);
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
}
dim3 numThreads = dim3(blockSize_x, blockSize_y, 1);
dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
unsigned int numData = w*h;
unsigned int memSize = sizeof(int) * numData;
//allocate mem for the result on host side
unsigned int *h_odata = (unsigned int *)malloc(memSize);
//initialize the memory
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// allocate device memory for result
unsigned int *d_odata, *d_img0, *d_img1;
checkCudaErrors(hipMalloc((void **) &d_odata, memSize));
checkCudaErrors(hipMalloc((void **) &d_img0, memSize));
checkCudaErrors(hipMalloc((void **) &d_img1, memSize));
// copy host memory to device to initialize to zeros
checkCudaErrors(hipMemcpy(d_img0, h_img0, memSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_img1, h_img1, memSize, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_odata, memSize, hipMemcpyHostToDevice));
size_t offset = 0;
hipChannelFormatDesc ca_desc0 = hipCreateChannelDesc<unsigned int>();
hipChannelFormatDesc ca_desc1 = hipCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = hipAddressModeClamp;
tex2Dleft.addressMode[1] = hipAddressModeClamp;
tex2Dleft.filterMode = hipFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = hipAddressModeClamp;
tex2Dright.addressMode[1] = hipAddressModeClamp;
tex2Dright.filterMode = hipFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(hipBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(hipBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
printf("Launching CUDA stereoDisparityKernel()\n");
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// launch the stereoDisparity kernel
hipLaunchKernelGGL(( stereoDisparityKernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
//Copy result from device to host for verification
checkCudaErrors(hipMemcpy(h_odata, d_odata, memSize, hipMemcpyDeviceToHost));
printf("Input Size [%dx%d], ", w, h);
printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1));
printf("Disparities [%d:%d]\n", minDisp, maxDisp);
printf("GPU processing time : %.4f (ms)\n", msecTotal);
printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000);
// calculate sum of resultant GPU image
unsigned int checkSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
checkSum += h_odata[i];
}
printf("GPU Checksum = %u, ", checkSum);
// write out the resulting disparity image.
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
const char *fnameOut = "output_GPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
//compute reference solution
printf("Computing CPU reference...\n");
cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp);
unsigned int cpuCheckSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
cpuCheckSum += h_odata[i];
}
printf("CPU Checksum = %u, ", cpuCheckSum);
const char *cpuFnameOut = "output_CPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("CPU image: <%s>\n", cpuFnameOut);
sdkSavePGM(cpuFnameOut, dispOut, w, h);
// cleanup memory
checkCudaErrors(hipFree(d_odata));
checkCudaErrors(hipFree(d_img0));
checkCudaErrors(hipFree(d_img1));
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
if (dispOut != NULL) free(dispOut);
sdkDeleteTimer(&timer);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
4b39e059696ca6543e62ff7918de85e396b832ab.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* A CUDA program that demonstrates how to compute a stereo disparity map using
* SIMD SAD (Sum of Absolute Difference) intrinsics
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <cuda_runtime.h>
#include "stereoDisparity_kernel.cuh"
// includes, project
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
#include <helper_cuda.h> // helper for checking cuda initialization and error checking
#include <helper_string.h> // helper functions for string parsing
static const char *sSDKsample = "[stereoDisparity]\0";
int iDivUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! CUDA Sample for calculating depth maps
////////////////////////////////////////////////////////////////////////////////
void
runTest(int argc, char **argv)
{
cudaDeviceProp deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// This will pick the best possible CUDA capable device
dev = findCudaDevice(argc, (const char **)argv);
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x20)
{
printf("%s: requires a minimum CUDA compute 2.0 capability\n", sSDKsample);
exit(EXIT_SUCCESS);
}
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Search parameters
int minDisp = -16;
int maxDisp = 0;
// Load image data
//allocate mem for the images on host side
//initialize pointers to NULL to request lib call to allocate as needed
// PPM images are loaded into 4 byte/pixel memory (RGBX)
unsigned char *h_img0 = NULL;
unsigned char *h_img1 = NULL;
unsigned int w, h;
char *fname0 = sdkFindFilePath("stereo.im0.640x533.ppm", argv[0]);
char *fname1 = sdkFindFilePath("stereo.im1.640x533.ppm", argv[0]);
printf("Loaded <%s> as image 0\n", fname0);
if (!sdkLoadPPM4ub(fname0, &h_img0, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname0);
}
printf("Loaded <%s> as image 1\n", fname1);
if (!sdkLoadPPM4ub(fname1, &h_img1, &w, &h))
{
fprintf(stderr, "Failed to load <%s>\n", fname1);
}
dim3 numThreads = dim3(blockSize_x, blockSize_y, 1);
dim3 numBlocks = dim3(iDivUp(w, numThreads.x), iDivUp(h, numThreads.y));
unsigned int numData = w*h;
unsigned int memSize = sizeof(int) * numData;
//allocate mem for the result on host side
unsigned int *h_odata = (unsigned int *)malloc(memSize);
//initialize the memory
for (unsigned int i = 0; i < numData; i++)
h_odata[i] = 0;
// allocate device memory for result
unsigned int *d_odata, *d_img0, *d_img1;
checkCudaErrors(cudaMalloc((void **) &d_odata, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img0, memSize));
checkCudaErrors(cudaMalloc((void **) &d_img1, memSize));
// copy host memory to device to initialize to zeros
checkCudaErrors(cudaMemcpy(d_img0, h_img0, memSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_img1, h_img1, memSize, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_odata, memSize, cudaMemcpyHostToDevice));
size_t offset = 0;
cudaChannelFormatDesc ca_desc0 = cudaCreateChannelDesc<unsigned int>();
cudaChannelFormatDesc ca_desc1 = cudaCreateChannelDesc<unsigned int>();
tex2Dleft.addressMode[0] = cudaAddressModeClamp;
tex2Dleft.addressMode[1] = cudaAddressModeClamp;
tex2Dleft.filterMode = cudaFilterModePoint;
tex2Dleft.normalized = false;
tex2Dright.addressMode[0] = cudaAddressModeClamp;
tex2Dright.addressMode[1] = cudaAddressModeClamp;
tex2Dright.filterMode = cudaFilterModePoint;
tex2Dright.normalized = false;
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dleft, d_img0, ca_desc0, w, h, w*4));
assert(offset == 0);
checkCudaErrors(cudaBindTexture2D(&offset, tex2Dright, d_img1, ca_desc1, w, h, w*4));
assert(offset == 0);
// First run the warmup kernel (which we'll use to get the GPU in the correct max power state
stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
printf("Launching CUDA stereoDisparityKernel()\n");
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// launch the stereoDisparity kernel
stereoDisparityKernel<<<numBlocks, numThreads>>>(d_img0, d_img1, d_odata, w, h, minDisp, maxDisp);
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
// Check to make sure the kernel didn't fail
getLastCudaError("Kernel execution failed");
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
//Copy result from device to host for verification
checkCudaErrors(cudaMemcpy(h_odata, d_odata, memSize, cudaMemcpyDeviceToHost));
printf("Input Size [%dx%d], ", w, h);
printf("Kernel size [%dx%d], ", (2*RAD+1), (2*RAD+1));
printf("Disparities [%d:%d]\n", minDisp, maxDisp);
printf("GPU processing time : %.4f (ms)\n", msecTotal);
printf("Pixel throughput : %.3f Mpixels/sec\n", ((float)(w *h*1000.f)/msecTotal)/1000000);
// calculate sum of resultant GPU image
unsigned int checkSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
checkSum += h_odata[i];
}
printf("GPU Checksum = %u, ", checkSum);
// write out the resulting disparity image.
unsigned char *dispOut = (unsigned char *)malloc(numData);
int mult = 20;
const char *fnameOut = "output_GPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("GPU image: <%s>\n", fnameOut);
sdkSavePGM(fnameOut, dispOut, w, h);
//compute reference solution
printf("Computing CPU reference...\n");
cpu_gold_stereo((unsigned int *)h_img0, (unsigned int *)h_img1, (unsigned int *)h_odata, w, h, minDisp, maxDisp);
unsigned int cpuCheckSum = 0;
for (unsigned int i=0 ; i<w *h ; i++)
{
cpuCheckSum += h_odata[i];
}
printf("CPU Checksum = %u, ", cpuCheckSum);
const char *cpuFnameOut = "output_CPU.pgm";
for (unsigned int i=0; i<numData; i++)
{
dispOut[i] = (int)h_odata[i]*mult;
}
printf("CPU image: <%s>\n", cpuFnameOut);
sdkSavePGM(cpuFnameOut, dispOut, w, h);
// cleanup memory
checkCudaErrors(cudaFree(d_odata));
checkCudaErrors(cudaFree(d_img0));
checkCudaErrors(cudaFree(d_img1));
if (h_odata != NULL) free(h_odata);
if (h_img0 != NULL) free(h_img0);
if (h_img1 != NULL) free(h_img1);
if (dispOut != NULL) free(dispOut);
sdkDeleteTimer(&timer);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit((checkSum == cpuCheckSum) ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
8a078ac2c06b2f776533f50bafb50d2943891ae9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/nms.h"
#include "cudakernel/nn/topk.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/common/tensor_shape.h"
#include <hip/hip_fp16.h>
#include <float.h>
#include <memory>
#define CUDA_ALIGNMENT 128
constexpr int N_BOX_DIM = 4;
constexpr int NMS_BLOCK_SIZE = 8 * sizeof(int64_t);
template <typename T>
__device__ __inline__ void maxMin(T a, T b, T& min_val, T& max_val)
{
if (Math<T, T, T>::gt(a, b)) {
min_val = b;
max_val = a;
} else {
min_val = a;
max_val = b;
}
}
template <typename T>
__device__ __inline__ T Max(T a, T b)
{
if (Math<T, T, T>::gt(a, b)) {
return a;
} else {
return b;
}
}
template <typename T>
__device__ __inline__ T Min(T a, T b)
{
if (Math<T, T, T>::lt(a, b)) {
return a;
} else {
return b;
}
}
template <typename T>
__device__ bool devIoU(const T* box_i, const T* box_j, T iou_threshold, int center_point_box)
{
// use math helper
typedef Math<T, T, T> OpMath;
T ix_min, ix_max, iy_min, iy_max;
T jx_min, jx_max, jy_min, jy_max;
if (center_point_box == 0) {
maxMin(box_i[0], box_i[2], iy_min, iy_max);
maxMin(box_i[1], box_i[3], ix_min, ix_max);
maxMin(box_j[0], box_j[2], jy_min, jy_max);
maxMin(box_j[1], box_j[3], jx_min, jx_max);
} else {
T iw_half = OpMath::div(box_i[2], (T)2);
T ih_half = OpMath::div(box_i[3], (T)2);
ix_min = OpMath::sub(box_i[0], iw_half);
ix_max = OpMath::add(box_i[0], iw_half);
iy_min = OpMath::sub(box_i[1], ih_half);
iy_max = OpMath::add(box_i[1], ih_half);
T jw_half = OpMath::div(box_j[2], (T)2);
T jh_half = OpMath::div(box_j[3], (T)2);
jx_min = OpMath::sub(box_j[0], jw_half);
jx_max = OpMath::add(box_j[0], jw_half);
jy_min = OpMath::sub(box_j[1], jh_half);
jy_max = OpMath::add(box_j[1], jh_half);
}
T interx_min, interx_max, intery_min, intery_max;
interx_min = Max(ix_min, jx_min);
intery_min = Max(iy_min, jy_min);
interx_max = Min(ix_max, jx_max);
intery_max = Min(iy_max, jy_max);
T inter_area = OpMath::mul(Max(OpMath::sub(interx_max, interx_min), (T)0),
Max(OpMath::sub(intery_max, intery_min), (T)0));
if (OpMath::le(inter_area, (T)0))
return false;
T i_area = OpMath::mul(OpMath::sub(ix_max, ix_min), OpMath::sub(iy_max, iy_min));
T j_area = OpMath::mul(OpMath::sub(jx_max, jx_min), OpMath::sub(jy_max, jy_min));
T union_area = OpMath::sub(OpMath::add(i_area, j_area), inter_area);
if (OpMath::le(i_area, (T)0) || OpMath::le(j_area, (T)0) ||
OpMath::le(union_area, (T)0))
return false;
T iou_ratio = OpMath::div(inter_area, union_area);
return OpMath::gt(iou_ratio, iou_threshold);
}
template <typename T>
__global__ __launch_bounds__(NMS_BLOCK_SIZE) void nms_one_one_kernel(
int num_boxes,
int num_blocks,
const T* boxes,
float iou_threshold,
int center_point_box,
uint64_t* out_mask)
{
T t_iou_threshold = (T)iou_threshold;
__shared__ T s_boxes[NMS_BLOCK_SIZE * N_BOX_DIM];
// step 1: load col boxes to shared memory
int tid = threadIdx.x;
int col_boxes_start = blockIdx.x * NMS_BLOCK_SIZE;
int row_boxes_start = blockIdx.y * NMS_BLOCK_SIZE;
// no need to compute (redundant)
if (col_boxes_start < row_boxes_start)
return;
// last thread block may overflow
int col_size = min(num_boxes - col_boxes_start, NMS_BLOCK_SIZE);
if (tid < col_size) {
s_boxes[tid * N_BOX_DIM + 0] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 0];
s_boxes[tid * N_BOX_DIM + 1] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 1];
s_boxes[tid * N_BOX_DIM + 2] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 2];
s_boxes[tid * N_BOX_DIM + 3] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 3];
}
__syncthreads();
// step 2: iou mask with #NMS_BLOCK_SIZE boxes in smem
int row_size = min(num_boxes - row_boxes_start, NMS_BLOCK_SIZE);
if (tid < row_size) {
uint64_t mask = 0;
int cur_box = row_boxes_start + tid;
int start = (row_boxes_start == col_boxes_start) ? tid + 1 : 0;
const T *box_row = boxes + cur_box * N_BOX_DIM;
for (int it = start; it < col_size; ++it) {
if (devIoU(box_row, s_boxes + it * N_BOX_DIM, t_iou_threshold, center_point_box)) {
mask |= (1ULL << it);
}
}
int out_idx = cur_box * num_blocks + blockIdx.x;
out_mask[out_idx] = mask;
}
}
__device__ __inline__ bool isBitSet(uint64_t *mask, int pos)
{
constexpr int num_bits = 6; // int64_t
constexpr int mod_num = 63;
int mask_pos = pos >> num_bits; // div(64)
int rem_pos = pos & mod_num; // %(64)
return (mask[mask_pos] >> rem_pos) & 1;
}
// only launch one thread block
__global__ void nms_reduce_mask_kernel(
int num_blocks,
int num_boxes,
int max_boxes,
const uint64_t* in_mask,
bool* reduced_mask)
{
extern __shared__ uint64_t s_reduced_mask[];
int tid = threadIdx.x;
for (int it = tid; it < num_blocks; it += blockDim.x) {
s_reduced_mask[it] = 0xFFFFFFFFFFFFFFFF;
}
__syncthreads();
int accepted_boxes = 0;
// no need to deal with last box's mask: num_boxes - 1
for (int b_idx = 0; b_idx < num_boxes - 1; ++b_idx) {
if (!isBitSet(s_reduced_mask, b_idx))
continue;
++accepted_boxes;
const uint64_t *cur_mask = in_mask + b_idx * num_blocks;
for (int it = tid; it < num_blocks; it += blockDim.x) {
s_reduced_mask[it] &= ~cur_mask[it];
}
__syncthreads();
if (accepted_boxes >= max_boxes)
break;
}
for (int it = tid; it < num_boxes; it += blockDim.x) {
reduced_mask[it] = isBitSet(s_reduced_mask, it);
}
}
__global__ void nms_reduce_mask_kernel_global(
int num_blocks,
int num_boxes,
int max_boxes,
const uint64_t* in_mask,
uint64_t* g_reduce_mask,
bool* reduced_mask)
{
int tid = threadIdx.x;
for (int it = tid; it < num_blocks; it += blockDim.x) {
g_reduce_mask[it] = 0xFFFFFFFFFFFFFFFF;
}
__syncthreads();
int accepted_boxes = 0;
// no need to deal with last box's mask: num_boxes - 1
for (int b_idx = 0; b_idx < num_boxes - 1; ++b_idx) {
if (!isBitSet(g_reduce_mask, b_idx))
continue;
++accepted_boxes;
const uint64_t *cur_mask = in_mask + b_idx * num_blocks;
for (int it = tid; it < num_blocks; it += blockDim.x) {
g_reduce_mask[it] &= ~cur_mask[it];
}
__syncthreads();
if (accepted_boxes >= max_boxes)
break;
}
for (int it = tid; it < num_boxes; it += blockDim.x) {
reduced_mask[it] = isBitSet(g_reduce_mask, it);
}
}
template <typename T>
__global__ void indexSelectBoxes(
int num_filtered_boxes,
int32_t* sorted_indices,
const T* boxes,
T* sorted_boxes)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_filtered_boxes)
return;
int in_index = sorted_indices[index] * N_BOX_DIM;
int out_index = index * N_BOX_DIM;
#pragma unrool
for (int it = 0; it < N_BOX_DIM; ++it) {
sorted_boxes[out_index + it] = boxes[in_index + it];
}
}
// only launch 1 thread blocks (256 threads)
template <typename T>
__global__ void countScoresBoxes(
int num_boxes,
float score_threshold,
const T* sorted_boxes,
int* d_num_filter_scores)
{
int tid = threadIdx.x;
T t_score_threshold = (T)score_threshold;
__shared__ int acc_num[256];
acc_num[tid] = 0;
__syncthreads();
for (int it = tid; it < num_boxes; it += blockDim.x) {
acc_num[tid] += Math<T, T, T>::gt(sorted_boxes[it], t_score_threshold) ? 1 : 0;
}
for (int it = 128; it > 0; it = it >> 1) {
if (tid < it)
acc_num[tid] += acc_num[tid + it];
__syncthreads();
}
if (tid == 0)
d_num_filter_scores[0] = acc_num[0];
}
template <typename T>
void NMSGpuImpl(
hipStream_t stream,
const T* sorted_boxes,
float iou_threshold,
int num_filtered_boxes,
int max_output_boxes_per_class,
int center_point_box,
int max_shared_mem,
uint64_t* g_reduce_mask,
uint64_t* dev_mask,
bool* result_mask)
{
// step 1: calculate all iou
constexpr int block_size = NMS_BLOCK_SIZE;
int num_blocks = DivUp(num_filtered_boxes, block_size);
dim3 grid_size(num_blocks, num_blocks);
hipLaunchKernelGGL(( nms_one_one_kernel), dim3(grid_size), dim3(block_size), 0, stream, num_filtered_boxes,
num_blocks,
sorted_boxes,
iou_threshold,
center_point_box,
dev_mask);
// step 2: mask reduce
int32_t reduced_mask_size = num_blocks * block_size;
if (max_shared_mem > reduced_mask_size) {
// #boxes should not exceed #bits in shared memory
hipLaunchKernelGGL(( nms_reduce_mask_kernel), dim3(1), dim3(1024), reduced_mask_size, stream, num_blocks,
num_filtered_boxes,
max_output_boxes_per_class,
dev_mask,
result_mask);
} else {
// use global memory
hipLaunchKernelGGL(( nms_reduce_mask_kernel_global), dim3(1), dim3(1024), 0, stream, num_blocks,
num_filtered_boxes,
max_output_boxes_per_class,
dev_mask,
g_reduce_mask,
result_mask);
}
}
int64_t PPLNMSGetTempBufferSize(const ppl::nn::TensorShape* scores_shape)
{
int64_t total_size = 0;
int elem_size = ppl::common::GetSizeOfDataType(scores_shape->GetDataType());
int num_class = scores_shape->GetDim(1);
int num_boxes = scores_shape->GetDim(2);
ppl::nn::TensorShape indices_shape(*scores_shape);
indices_shape.Reshape({num_class, num_boxes});
indices_shape.SetDataType(ppl::common::DATATYPE_INT64); // max int64
int axis = 1;
total_size += Align(PPLTopKGetTempBufferSize(&indices_shape, num_boxes, axis), CUDA_ALIGNMENT);
total_size += Align(elem_size * num_class * num_boxes, CUDA_ALIGNMENT); // sorted scores;
total_size += Align(sizeof(int64_t) * num_class * num_boxes, CUDA_ALIGNMENT); // sorted indices;
total_size += Align(elem_size * N_BOX_DIM * num_boxes, CUDA_ALIGNMENT); // sorted boxes;
int blocks = DivUp(num_boxes, NMS_BLOCK_SIZE);
total_size += Align(sizeof(uint64_t) * blocks * num_boxes, CUDA_ALIGNMENT); // one-one mapping mask;
total_size += Align(sizeof(bool) * num_boxes, CUDA_ALIGNMENT); // reduced mask;
total_size += Align(sizeof(int), CUDA_ALIGNMENT); // count filtered boxes number
// reduce needed
int num_blocks = DivUp(num_boxes, NMS_BLOCK_SIZE);
total_size += Align(num_blocks * NMS_BLOCK_SIZE, CUDA_ALIGNMENT); // count filtered boxes number
return total_size;
}
ppl::common::RetCode PPLCUDANMSForwardImp(
hipStream_t stream,
ppl::nn::TensorShape* boxes_shape,
const void* boxes,
ppl::nn::TensorShape* scores_shape,
const void* scores,
ppl::nn::TensorShape* output_shape,
int64_t* output,
void* temp_buffer,
int64_t temp_buffer_bytes,
int device_id,
int center_point_box,
int max_output_boxes_per_class,
float iou_threshold,
float score_threshold)
{
int num_selected_indices = 0;
int num_batch = boxes_shape->GetDim(0);
int num_boxes = boxes_shape->GetDim(1);
int num_class = scores_shape->GetDim(1);
// shape for top-k use
int elem_size = ppl::common::GetSizeOfDataType(scores_shape->GetDataType());
hipDeviceProp_t gpu_prob;
hipGetDeviceProperties(&gpu_prob, device_id);
int max_shared_mem = gpu_prob.sharedMemPerBlock;
// temp buffer for sort & nms & construct
ppl::nn::TensorShape topk_shape(*scores_shape);
topk_shape.Reshape({num_class, num_boxes});
topk_shape.SetDataType(scores_shape->GetDataType());
ppl::nn::TensorShape indices_shape(*scores_shape);
indices_shape.Reshape({num_class, num_boxes});
indices_shape.SetDataType(ppl::common::DATATYPE_INT32);
int axis = 1;
int topk_buffer_size = PPLTopKGetTempBufferSize(&indices_shape, num_boxes, axis);
void *topk_buffer = temp_buffer;
void *sorted_scores_tot = static_cast<void *>(static_cast<char *>(topk_buffer) + Align(topk_buffer_size, CUDA_ALIGNMENT));
int32_t *sorted_indices_tot = reinterpret_cast<int32_t *>(
static_cast<char *>(sorted_scores_tot) + Align(elem_size * num_class * num_boxes, CUDA_ALIGNMENT));
void *sorted_boxes = static_cast<void *>((char *)sorted_indices_tot + Align(num_class * num_boxes * sizeof(int32_t), CUDA_ALIGNMENT));
uint64_t *dev_mask = reinterpret_cast<uint64_t *>(
static_cast<char *>(sorted_boxes) + Align(elem_size * N_BOX_DIM * num_boxes, CUDA_ALIGNMENT));
// each bit in int64_t represent one iou
int blocks = DivUp(num_boxes, NMS_BLOCK_SIZE);
bool *result_mask = reinterpret_cast<bool *>((char *)dev_mask + Align(blocks * num_boxes * sizeof(uint64_t), CUDA_ALIGNMENT));
int *d_num_filter_scores = reinterpret_cast<int *>(result_mask + Align(num_boxes * sizeof(bool), CUDA_ALIGNMENT));
uint64_t *g_reduce_mask = reinterpret_cast<uint64_t *>((char *)d_num_filter_scores + Align(sizeof(int), CUDA_ALIGNMENT));
// process one class one time
for (int b = 0; b < num_batch; ++b) {
// step 1: sort scores and index select boxes
hipMemset(temp_buffer, 0, temp_buffer_bytes);
PPLCUDATopKForwardImp(stream,
&topk_shape,
static_cast<const float *>(scores) + b * scores_shape->GetElementsFromDimensionIncludingPadding(1),
&topk_shape,
sorted_scores_tot,
&indices_shape,
sorted_indices_tot,
topk_buffer,
topk_buffer_size,
num_boxes,
axis);
// int nms_buffer_size = temp_buffer_bytes - Align(topk_buffer_size, CUDA_ALIGNMENT) -
// Align(elem_size * num_class * num_boxes, CUDA_ALIGNMENT) -
// Align(num_class * num_boxes * sizeof(int32_t), CUDA_ALIGNMENT);
for (int c = 0; c < num_class; ++c) {
// reset to zero each iteration (Not necessary)
// hipMemset(sorted_boxes, 0, nms_buffer_size);
float *sorted_scores = static_cast<float*>(sorted_scores_tot) + c * num_boxes;//+ b * num_class * num_boxes + c * num_boxes;
int32_t *sorted_indices = sorted_indices_tot + c * num_boxes;// + b * num_class * num_boxes + c * num_boxes;
int num_selected_indices_per_class = 0;
int num_filtered_boxes = 0;
// count scores above score_threshold
{
int block_size = 256;
if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( countScoresBoxes), dim3(1), dim3(block_size), 0, stream,
num_boxes, score_threshold, (const float *)sorted_scores, d_num_filter_scores);
} else if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( countScoresBoxes), dim3(1), dim3(block_size), 0, stream,
num_boxes, score_threshold, (const half *)sorted_scores, d_num_filter_scores);
} else {
return ppl::common::RC_UNSUPPORTED;
}
hipMemcpyAsync((void *)(&num_filtered_boxes), d_num_filter_scores, sizeof(int), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
// index select
if (num_filtered_boxes > 0) {
{
int block_size = 256;
int grid_size = DivUp(num_filtered_boxes, block_size);
if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( indexSelectBoxes), dim3(grid_size), dim3(block_size), 0, stream,
num_filtered_boxes, sorted_indices, static_cast<const float *>(boxes) + b * num_boxes * 4, (float *)sorted_boxes);
} else if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( indexSelectBoxes), dim3(grid_size), dim3(block_size), 0, stream,
num_filtered_boxes, sorted_indices, static_cast<const half *>(boxes) + b * num_boxes * 4, (half *)sorted_boxes);
} else {
return ppl::common::RC_UNSUPPORTED;
}
}
// step 2: nms operations (type related)
if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
NMSGpuImpl<float>(stream, (const float *)sorted_boxes, iou_threshold, num_filtered_boxes, max_output_boxes_per_class, center_point_box, max_shared_mem, g_reduce_mask, dev_mask, result_mask);
} else if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
NMSGpuImpl<half>(stream, (const half *)sorted_boxes, iou_threshold, num_filtered_boxes, max_output_boxes_per_class, center_point_box, max_shared_mem, g_reduce_mask, dev_mask, result_mask);
} else {
return ppl::common::RC_UNSUPPORTED;
}
// step 3: mapping back to origin index on cpu
std::unique_ptr<int32_t[]> h_sorted_indices(new int32_t[num_boxes]);
std::unique_ptr<bool[]> h_result_mask(new bool[num_boxes]);
// construct output on cpu
std::unique_ptr<int64_t[]> h_constructed_indices(new int64_t[num_boxes * 3]);
hipMemcpyAsync(h_sorted_indices.get(), sorted_indices, sizeof(int32_t) * num_boxes, hipMemcpyDeviceToHost, stream);
hipMemcpyAsync(h_result_mask.get(), result_mask, sizeof(bool) * num_boxes, hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
// int num_eval_boxes = ::min(num_filtered_boxes, max_output_boxes_per_class);
for (int it = 0; it < num_filtered_boxes; ++it) {
if (h_result_mask.get()[it]) {
h_constructed_indices.get()[num_selected_indices_per_class * 3 + 0] = b;
h_constructed_indices.get()[num_selected_indices_per_class * 3 + 1] = c;
h_constructed_indices.get()[num_selected_indices_per_class * 3 + 2] =
h_sorted_indices.get()[it];
++num_selected_indices_per_class;
if (num_selected_indices_per_class >= max_output_boxes_per_class) break;
}
}
// step 4: gather one class output to totals
hipMemcpyAsync(output + num_selected_indices * 3, h_constructed_indices.get(),
// 3 means [batch_index, class_index, box_index]
sizeof(int64_t) * num_selected_indices_per_class * 3,
hipMemcpyHostToDevice,
stream);
num_selected_indices += num_selected_indices_per_class;
}
}
}
output_shape->SetDim(0, num_selected_indices);
return ppl::common::RC_SUCCESS;
}
|
8a078ac2c06b2f776533f50bafb50d2943891ae9.cu
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/nms.h"
#include "cudakernel/nn/topk.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/common/tensor_shape.h"
#include <cuda_fp16.h>
#include <float.h>
#include <memory>
#define CUDA_ALIGNMENT 128
constexpr int N_BOX_DIM = 4;
constexpr int NMS_BLOCK_SIZE = 8 * sizeof(int64_t);
template <typename T>
__device__ __inline__ void maxMin(T a, T b, T& min_val, T& max_val)
{
if (Math<T, T, T>::gt(a, b)) {
min_val = b;
max_val = a;
} else {
min_val = a;
max_val = b;
}
}
template <typename T>
__device__ __inline__ T Max(T a, T b)
{
if (Math<T, T, T>::gt(a, b)) {
return a;
} else {
return b;
}
}
template <typename T>
__device__ __inline__ T Min(T a, T b)
{
if (Math<T, T, T>::lt(a, b)) {
return a;
} else {
return b;
}
}
template <typename T>
__device__ bool devIoU(const T* box_i, const T* box_j, T iou_threshold, int center_point_box)
{
// use math helper
typedef Math<T, T, T> OpMath;
T ix_min, ix_max, iy_min, iy_max;
T jx_min, jx_max, jy_min, jy_max;
if (center_point_box == 0) {
maxMin(box_i[0], box_i[2], iy_min, iy_max);
maxMin(box_i[1], box_i[3], ix_min, ix_max);
maxMin(box_j[0], box_j[2], jy_min, jy_max);
maxMin(box_j[1], box_j[3], jx_min, jx_max);
} else {
T iw_half = OpMath::div(box_i[2], (T)2);
T ih_half = OpMath::div(box_i[3], (T)2);
ix_min = OpMath::sub(box_i[0], iw_half);
ix_max = OpMath::add(box_i[0], iw_half);
iy_min = OpMath::sub(box_i[1], ih_half);
iy_max = OpMath::add(box_i[1], ih_half);
T jw_half = OpMath::div(box_j[2], (T)2);
T jh_half = OpMath::div(box_j[3], (T)2);
jx_min = OpMath::sub(box_j[0], jw_half);
jx_max = OpMath::add(box_j[0], jw_half);
jy_min = OpMath::sub(box_j[1], jh_half);
jy_max = OpMath::add(box_j[1], jh_half);
}
T interx_min, interx_max, intery_min, intery_max;
interx_min = Max(ix_min, jx_min);
intery_min = Max(iy_min, jy_min);
interx_max = Min(ix_max, jx_max);
intery_max = Min(iy_max, jy_max);
T inter_area = OpMath::mul(Max(OpMath::sub(interx_max, interx_min), (T)0),
Max(OpMath::sub(intery_max, intery_min), (T)0));
if (OpMath::le(inter_area, (T)0))
return false;
T i_area = OpMath::mul(OpMath::sub(ix_max, ix_min), OpMath::sub(iy_max, iy_min));
T j_area = OpMath::mul(OpMath::sub(jx_max, jx_min), OpMath::sub(jy_max, jy_min));
T union_area = OpMath::sub(OpMath::add(i_area, j_area), inter_area);
if (OpMath::le(i_area, (T)0) || OpMath::le(j_area, (T)0) ||
OpMath::le(union_area, (T)0))
return false;
T iou_ratio = OpMath::div(inter_area, union_area);
return OpMath::gt(iou_ratio, iou_threshold);
}
template <typename T>
__global__ __launch_bounds__(NMS_BLOCK_SIZE) void nms_one_one_kernel(
int num_boxes,
int num_blocks,
const T* boxes,
float iou_threshold,
int center_point_box,
uint64_t* out_mask)
{
T t_iou_threshold = (T)iou_threshold;
__shared__ T s_boxes[NMS_BLOCK_SIZE * N_BOX_DIM];
// step 1: load col boxes to shared memory
int tid = threadIdx.x;
int col_boxes_start = blockIdx.x * NMS_BLOCK_SIZE;
int row_boxes_start = blockIdx.y * NMS_BLOCK_SIZE;
// no need to compute (redundant)
if (col_boxes_start < row_boxes_start)
return;
// last thread block may overflow
int col_size = min(num_boxes - col_boxes_start, NMS_BLOCK_SIZE);
if (tid < col_size) {
s_boxes[tid * N_BOX_DIM + 0] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 0];
s_boxes[tid * N_BOX_DIM + 1] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 1];
s_boxes[tid * N_BOX_DIM + 2] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 2];
s_boxes[tid * N_BOX_DIM + 3] = boxes[(col_boxes_start + tid) * N_BOX_DIM + 3];
}
__syncthreads();
// step 2: iou mask with #NMS_BLOCK_SIZE boxes in smem
int row_size = min(num_boxes - row_boxes_start, NMS_BLOCK_SIZE);
if (tid < row_size) {
uint64_t mask = 0;
int cur_box = row_boxes_start + tid;
int start = (row_boxes_start == col_boxes_start) ? tid + 1 : 0;
const T *box_row = boxes + cur_box * N_BOX_DIM;
for (int it = start; it < col_size; ++it) {
if (devIoU(box_row, s_boxes + it * N_BOX_DIM, t_iou_threshold, center_point_box)) {
mask |= (1ULL << it);
}
}
int out_idx = cur_box * num_blocks + blockIdx.x;
out_mask[out_idx] = mask;
}
}
__device__ __inline__ bool isBitSet(uint64_t *mask, int pos)
{
constexpr int num_bits = 6; // int64_t
constexpr int mod_num = 63;
int mask_pos = pos >> num_bits; // div(64)
int rem_pos = pos & mod_num; // %(64)
return (mask[mask_pos] >> rem_pos) & 1;
}
// only launch one thread block
__global__ void nms_reduce_mask_kernel(
int num_blocks,
int num_boxes,
int max_boxes,
const uint64_t* in_mask,
bool* reduced_mask)
{
extern __shared__ uint64_t s_reduced_mask[];
int tid = threadIdx.x;
for (int it = tid; it < num_blocks; it += blockDim.x) {
s_reduced_mask[it] = 0xFFFFFFFFFFFFFFFF;
}
__syncthreads();
int accepted_boxes = 0;
// no need to deal with last box's mask: num_boxes - 1
for (int b_idx = 0; b_idx < num_boxes - 1; ++b_idx) {
if (!isBitSet(s_reduced_mask, b_idx))
continue;
++accepted_boxes;
const uint64_t *cur_mask = in_mask + b_idx * num_blocks;
for (int it = tid; it < num_blocks; it += blockDim.x) {
s_reduced_mask[it] &= ~cur_mask[it];
}
__syncthreads();
if (accepted_boxes >= max_boxes)
break;
}
for (int it = tid; it < num_boxes; it += blockDim.x) {
reduced_mask[it] = isBitSet(s_reduced_mask, it);
}
}
__global__ void nms_reduce_mask_kernel_global(
int num_blocks,
int num_boxes,
int max_boxes,
const uint64_t* in_mask,
uint64_t* g_reduce_mask,
bool* reduced_mask)
{
int tid = threadIdx.x;
for (int it = tid; it < num_blocks; it += blockDim.x) {
g_reduce_mask[it] = 0xFFFFFFFFFFFFFFFF;
}
__syncthreads();
int accepted_boxes = 0;
// no need to deal with last box's mask: num_boxes - 1
for (int b_idx = 0; b_idx < num_boxes - 1; ++b_idx) {
if (!isBitSet(g_reduce_mask, b_idx))
continue;
++accepted_boxes;
const uint64_t *cur_mask = in_mask + b_idx * num_blocks;
for (int it = tid; it < num_blocks; it += blockDim.x) {
g_reduce_mask[it] &= ~cur_mask[it];
}
__syncthreads();
if (accepted_boxes >= max_boxes)
break;
}
for (int it = tid; it < num_boxes; it += blockDim.x) {
reduced_mask[it] = isBitSet(g_reduce_mask, it);
}
}
template <typename T>
__global__ void indexSelectBoxes(
int num_filtered_boxes,
int32_t* sorted_indices,
const T* boxes,
T* sorted_boxes)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= num_filtered_boxes)
return;
int in_index = sorted_indices[index] * N_BOX_DIM;
int out_index = index * N_BOX_DIM;
#pragma unrool
for (int it = 0; it < N_BOX_DIM; ++it) {
sorted_boxes[out_index + it] = boxes[in_index + it];
}
}
// only launch 1 thread blocks (256 threads)
template <typename T>
__global__ void countScoresBoxes(
int num_boxes,
float score_threshold,
const T* sorted_boxes,
int* d_num_filter_scores)
{
int tid = threadIdx.x;
T t_score_threshold = (T)score_threshold;
__shared__ int acc_num[256];
acc_num[tid] = 0;
__syncthreads();
for (int it = tid; it < num_boxes; it += blockDim.x) {
acc_num[tid] += Math<T, T, T>::gt(sorted_boxes[it], t_score_threshold) ? 1 : 0;
}
for (int it = 128; it > 0; it = it >> 1) {
if (tid < it)
acc_num[tid] += acc_num[tid + it];
__syncthreads();
}
if (tid == 0)
d_num_filter_scores[0] = acc_num[0];
}
template <typename T>
void NMSGpuImpl(
cudaStream_t stream,
const T* sorted_boxes,
float iou_threshold,
int num_filtered_boxes,
int max_output_boxes_per_class,
int center_point_box,
int max_shared_mem,
uint64_t* g_reduce_mask,
uint64_t* dev_mask,
bool* result_mask)
{
// step 1: calculate all iou
constexpr int block_size = NMS_BLOCK_SIZE;
int num_blocks = DivUp(num_filtered_boxes, block_size);
dim3 grid_size(num_blocks, num_blocks);
nms_one_one_kernel<<<grid_size, block_size, 0, stream>>>(num_filtered_boxes,
num_blocks,
sorted_boxes,
iou_threshold,
center_point_box,
dev_mask);
// step 2: mask reduce
int32_t reduced_mask_size = num_blocks * block_size;
if (max_shared_mem > reduced_mask_size) {
// #boxes should not exceed #bits in shared memory
nms_reduce_mask_kernel<<<1, 1024, reduced_mask_size, stream>>>(num_blocks,
num_filtered_boxes,
max_output_boxes_per_class,
dev_mask,
result_mask);
} else {
// use global memory
nms_reduce_mask_kernel_global<<<1, 1024, 0, stream>>>(num_blocks,
num_filtered_boxes,
max_output_boxes_per_class,
dev_mask,
g_reduce_mask,
result_mask);
}
}
int64_t PPLNMSGetTempBufferSize(const ppl::nn::TensorShape* scores_shape)
{
int64_t total_size = 0;
int elem_size = ppl::common::GetSizeOfDataType(scores_shape->GetDataType());
int num_class = scores_shape->GetDim(1);
int num_boxes = scores_shape->GetDim(2);
ppl::nn::TensorShape indices_shape(*scores_shape);
indices_shape.Reshape({num_class, num_boxes});
indices_shape.SetDataType(ppl::common::DATATYPE_INT64); // max int64
int axis = 1;
total_size += Align(PPLTopKGetTempBufferSize(&indices_shape, num_boxes, axis), CUDA_ALIGNMENT);
total_size += Align(elem_size * num_class * num_boxes, CUDA_ALIGNMENT); // sorted scores;
total_size += Align(sizeof(int64_t) * num_class * num_boxes, CUDA_ALIGNMENT); // sorted indices;
total_size += Align(elem_size * N_BOX_DIM * num_boxes, CUDA_ALIGNMENT); // sorted boxes;
int blocks = DivUp(num_boxes, NMS_BLOCK_SIZE);
total_size += Align(sizeof(uint64_t) * blocks * num_boxes, CUDA_ALIGNMENT); // one-one mapping mask;
total_size += Align(sizeof(bool) * num_boxes, CUDA_ALIGNMENT); // reduced mask;
total_size += Align(sizeof(int), CUDA_ALIGNMENT); // count filtered boxes number
// reduce needed
int num_blocks = DivUp(num_boxes, NMS_BLOCK_SIZE);
total_size += Align(num_blocks * NMS_BLOCK_SIZE, CUDA_ALIGNMENT); // count filtered boxes number
return total_size;
}
ppl::common::RetCode PPLCUDANMSForwardImp(
cudaStream_t stream,
ppl::nn::TensorShape* boxes_shape,
const void* boxes,
ppl::nn::TensorShape* scores_shape,
const void* scores,
ppl::nn::TensorShape* output_shape,
int64_t* output,
void* temp_buffer,
int64_t temp_buffer_bytes,
int device_id,
int center_point_box,
int max_output_boxes_per_class,
float iou_threshold,
float score_threshold)
{
int num_selected_indices = 0;
int num_batch = boxes_shape->GetDim(0);
int num_boxes = boxes_shape->GetDim(1);
int num_class = scores_shape->GetDim(1);
// shape for top-k use
int elem_size = ppl::common::GetSizeOfDataType(scores_shape->GetDataType());
cudaDeviceProp gpu_prob;
cudaGetDeviceProperties(&gpu_prob, device_id);
int max_shared_mem = gpu_prob.sharedMemPerBlock;
// temp buffer for sort & nms & construct
ppl::nn::TensorShape topk_shape(*scores_shape);
topk_shape.Reshape({num_class, num_boxes});
topk_shape.SetDataType(scores_shape->GetDataType());
ppl::nn::TensorShape indices_shape(*scores_shape);
indices_shape.Reshape({num_class, num_boxes});
indices_shape.SetDataType(ppl::common::DATATYPE_INT32);
int axis = 1;
int topk_buffer_size = PPLTopKGetTempBufferSize(&indices_shape, num_boxes, axis);
void *topk_buffer = temp_buffer;
void *sorted_scores_tot = static_cast<void *>(static_cast<char *>(topk_buffer) + Align(topk_buffer_size, CUDA_ALIGNMENT));
int32_t *sorted_indices_tot = reinterpret_cast<int32_t *>(
static_cast<char *>(sorted_scores_tot) + Align(elem_size * num_class * num_boxes, CUDA_ALIGNMENT));
void *sorted_boxes = static_cast<void *>((char *)sorted_indices_tot + Align(num_class * num_boxes * sizeof(int32_t), CUDA_ALIGNMENT));
uint64_t *dev_mask = reinterpret_cast<uint64_t *>(
static_cast<char *>(sorted_boxes) + Align(elem_size * N_BOX_DIM * num_boxes, CUDA_ALIGNMENT));
// each bit in int64_t represent one iou
int blocks = DivUp(num_boxes, NMS_BLOCK_SIZE);
bool *result_mask = reinterpret_cast<bool *>((char *)dev_mask + Align(blocks * num_boxes * sizeof(uint64_t), CUDA_ALIGNMENT));
int *d_num_filter_scores = reinterpret_cast<int *>(result_mask + Align(num_boxes * sizeof(bool), CUDA_ALIGNMENT));
uint64_t *g_reduce_mask = reinterpret_cast<uint64_t *>((char *)d_num_filter_scores + Align(sizeof(int), CUDA_ALIGNMENT));
// process one class one time
for (int b = 0; b < num_batch; ++b) {
// step 1: sort scores and index select boxes
cudaMemset(temp_buffer, 0, temp_buffer_bytes);
PPLCUDATopKForwardImp(stream,
&topk_shape,
static_cast<const float *>(scores) + b * scores_shape->GetElementsFromDimensionIncludingPadding(1),
&topk_shape,
sorted_scores_tot,
&indices_shape,
sorted_indices_tot,
topk_buffer,
topk_buffer_size,
num_boxes,
axis);
// int nms_buffer_size = temp_buffer_bytes - Align(topk_buffer_size, CUDA_ALIGNMENT) -
// Align(elem_size * num_class * num_boxes, CUDA_ALIGNMENT) -
// Align(num_class * num_boxes * sizeof(int32_t), CUDA_ALIGNMENT);
for (int c = 0; c < num_class; ++c) {
// reset to zero each iteration (Not necessary)
// cudaMemset(sorted_boxes, 0, nms_buffer_size);
float *sorted_scores = static_cast<float*>(sorted_scores_tot) + c * num_boxes;//+ b * num_class * num_boxes + c * num_boxes;
int32_t *sorted_indices = sorted_indices_tot + c * num_boxes;// + b * num_class * num_boxes + c * num_boxes;
int num_selected_indices_per_class = 0;
int num_filtered_boxes = 0;
// count scores above score_threshold
{
int block_size = 256;
if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
countScoresBoxes<<<1, block_size, 0, stream>>>(
num_boxes, score_threshold, (const float *)sorted_scores, d_num_filter_scores);
} else if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
countScoresBoxes<<<1, block_size, 0, stream>>>(
num_boxes, score_threshold, (const half *)sorted_scores, d_num_filter_scores);
} else {
return ppl::common::RC_UNSUPPORTED;
}
cudaMemcpyAsync((void *)(&num_filtered_boxes), d_num_filter_scores, sizeof(int), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
// index select
if (num_filtered_boxes > 0) {
{
int block_size = 256;
int grid_size = DivUp(num_filtered_boxes, block_size);
if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
indexSelectBoxes<<<grid_size, block_size, 0, stream>>>(
num_filtered_boxes, sorted_indices, static_cast<const float *>(boxes) + b * num_boxes * 4, (float *)sorted_boxes);
} else if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
indexSelectBoxes<<<grid_size, block_size, 0, stream>>>(
num_filtered_boxes, sorted_indices, static_cast<const half *>(boxes) + b * num_boxes * 4, (half *)sorted_boxes);
} else {
return ppl::common::RC_UNSUPPORTED;
}
}
// step 2: nms operations (type related)
if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
NMSGpuImpl<float>(stream, (const float *)sorted_boxes, iou_threshold, num_filtered_boxes, max_output_boxes_per_class, center_point_box, max_shared_mem, g_reduce_mask, dev_mask, result_mask);
} else if (boxes_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
NMSGpuImpl<half>(stream, (const half *)sorted_boxes, iou_threshold, num_filtered_boxes, max_output_boxes_per_class, center_point_box, max_shared_mem, g_reduce_mask, dev_mask, result_mask);
} else {
return ppl::common::RC_UNSUPPORTED;
}
// step 3: mapping back to origin index on cpu
std::unique_ptr<int32_t[]> h_sorted_indices(new int32_t[num_boxes]);
std::unique_ptr<bool[]> h_result_mask(new bool[num_boxes]);
// construct output on cpu
std::unique_ptr<int64_t[]> h_constructed_indices(new int64_t[num_boxes * 3]);
cudaMemcpyAsync(h_sorted_indices.get(), sorted_indices, sizeof(int32_t) * num_boxes, cudaMemcpyDeviceToHost, stream);
cudaMemcpyAsync(h_result_mask.get(), result_mask, sizeof(bool) * num_boxes, cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
// int num_eval_boxes = std::min(num_filtered_boxes, max_output_boxes_per_class);
for (int it = 0; it < num_filtered_boxes; ++it) {
if (h_result_mask.get()[it]) {
h_constructed_indices.get()[num_selected_indices_per_class * 3 + 0] = b;
h_constructed_indices.get()[num_selected_indices_per_class * 3 + 1] = c;
h_constructed_indices.get()[num_selected_indices_per_class * 3 + 2] =
h_sorted_indices.get()[it];
++num_selected_indices_per_class;
if (num_selected_indices_per_class >= max_output_boxes_per_class) break;
}
}
// step 4: gather one class output to totals
cudaMemcpyAsync(output + num_selected_indices * 3, h_constructed_indices.get(),
// 3 means [batch_index, class_index, box_index]
sizeof(int64_t) * num_selected_indices_per_class * 3,
cudaMemcpyHostToDevice,
stream);
num_selected_indices += num_selected_indices_per_class;
}
}
}
output_shape->SetDim(0, num_selected_indices);
return ppl::common::RC_SUCCESS;
}
|
15f1bcb6dbf8b5e9103dc9bb0d9fa26c5dbb6c5d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void lshift_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Float ||
iter.dtype() == ScalarType::Double ||
iter.dtype() == ScalarType::Half ||
iter.dtype() == ScalarType::BFloat16) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * ::pow(static_cast<scalar_t>(2), b);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
}
void rshift_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Float ||
iter.dtype() == ScalarType::Double ||
iter.dtype() == ScalarType::Half ||
iter.dtype() == ScalarType::BFloat16) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a / ::pow(static_cast<scalar_t>(2), b);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
}} // namespace at::native
|
15f1bcb6dbf8b5e9103dc9bb0d9fa26c5dbb6c5d.cu
|
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void lshift_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Float ||
iter.dtype() == ScalarType::Double ||
iter.dtype() == ScalarType::Half ||
iter.dtype() == ScalarType::BFloat16) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * std::pow(static_cast<scalar_t>(2), b);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
}
void rshift_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == ScalarType::Float ||
iter.dtype() == ScalarType::Double ||
iter.dtype() == ScalarType::Half ||
iter.dtype() == ScalarType::BFloat16) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(
iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a / std::pow(static_cast<scalar_t>(2), b);
});
});
} else {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
}} // namespace at::native
|
910c955f34b5aed4656dfd8e0eacdeafd98236cf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
constexpr int N = 10;
/*
* NOTE:
* Come back to this function when directed to.
*
* - - - - - - - - - - - - - - - - - - - - - - -
*
* Hopefully by now you've seen how we allocate memory
* on the host and the device. Now that we've copied our
* data from the host to the device and called our kernel,
* we can take a look at our kernel.
*
* Please note how similar this is to our `1-threads` example.
* Each thread sets one index of c. This time, we don't manually
* pass the thread index to the thread because it's taken care of by
* CUDA, the library we are using.
*
* after examining the kernel you may return to where we called the kernel.
*/
__global__
void add_vectors(
double* a,
double* b,
double* c)
{
const int thread_id = blockIdx.x;
c[thread_id] = a[thread_id] + b[thread_id];
}
/*
*
* - - - - - - - - - - - - - - - - - - - - - - -
*
*/
int main(int, char**)
{
/*
* We are setting up the arrays in the same way
* as before
*/
std::cout << "Setting a=3, b=5, c=0\n";
auto a = new double[N];
auto b = new double[N];
auto c = new double[N];
for (int i=0; i<N; i++)
{
a[i] = 3.0;
b[i] = 5.0;
c[i] = 0.0;
}
/*
* This time, we also have to allocate
* memory on the 'device' which is our graphics card.
* our 'host' is the CPU, where this main function will run.
*/
double* device_a;
double* device_b;
double* device_c;
/*
* when we call `auto c = new double[N];` we are telling the CPU
* to allocate enough memory to fit N doubles. Now that we're also
* using a GPU, we have to tell the GPU to allocate enough memory
* for N doubles as well. We acomplish this with a cuda function:
*/
hipMalloc(&device_a, N * sizeof(double));
hipMalloc(&device_b, N * sizeof(double));
hipMalloc(&device_c, N * sizeof(double));
/*
* Now we have a, b, and c allocated and set to 3, 5, and 0
* on the host. On the device however, we have only allocated
* the memory. The memory is uninitialized.
*
* To fix this, we will copy the values from a on the host
* into the memory allocated for a on the device, and same
* goes for b and c.
*/
hipMemcpy(device_a, a, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_b, b, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_c, c, N * sizeof(double), hipMemcpyHostToDevice);
/*
* Now that we have our memory copied from the host (cpu) to the
* device (gpu) we can call our cuda kernel. The kernel can *only*
* operate on memory allocated on the GPU.
*
* After examining the function call below, you may return to the
* top of the file to take a look at the kernel.
*
* Calling the function with function_name<<< , >>>(parameters);
* is how we inform cuda how it should configure our kernel.
*
* the first parameter to the triple angle brackets is the number of blocks
* per grid that should be allocated, and the second parameter is the number
* of threads per block that should be allocated.
* The grid is the largest unit of computation when calling a kernel.
*
* Note: grids and blocks are entirely defined in software. threads and
* warps are determined by the hardware. By aligning the number of
* blocks and threads in software with the threads in the physical
* hardware, we can achieve very large increases in performance.
*
* For example, calling `add_vectors<<<10, 1>>>(a, b, c)` would tell cuda
* to allocate it 10 blocks per grid, and 1 thread per block.
* Alternatively, calling `add_vectors<<<4, 10>>>(a, b, c)` would tell
* cuda to allocate 4 blocks, each with 10 threads per block totalling
* 40 threads.
*/
hipLaunchKernelGGL(( add_vectors), dim3(N), dim3(1), 0, 0, device_a, device_b, device_c);
/*
* Hopefully by now you have some understanding of the calling conventions
* for cuda kernels and the nature of the grid, blocks, and threads.
*
* Now let us copy the data back from the device to the host, and see if
* we still get what we expect.
*/
hipMemcpy(c, device_c, N * sizeof(double), hipMemcpyDeviceToHost);
for (int i=0; i<N; i++)
{
std::cout << "c["<<i<<"] = " << c[i] << "\n";
}
delete[] a;
delete[] b;
delete[] c;
/*
* We also have to free memory on the device since we allocated
* it in two places.
*/
hipFree(device_a);
hipFree(device_b);
hipFree(device_c);
return 0;
}
|
910c955f34b5aed4656dfd8e0eacdeafd98236cf.cu
|
#include <iostream>
#include <cuda.h>
constexpr int N = 10;
/*
* NOTE:
* Come back to this function when directed to.
*
* - - - - - - - - - - - - - - - - - - - - - - -
*
* Hopefully by now you've seen how we allocate memory
* on the host and the device. Now that we've copied our
* data from the host to the device and called our kernel,
* we can take a look at our kernel.
*
* Please note how similar this is to our `1-threads` example.
* Each thread sets one index of c. This time, we don't manually
* pass the thread index to the thread because it's taken care of by
* CUDA, the library we are using.
*
* after examining the kernel you may return to where we called the kernel.
*/
__global__
void add_vectors(
double* a,
double* b,
double* c)
{
const int thread_id = blockIdx.x;
c[thread_id] = a[thread_id] + b[thread_id];
}
/*
*
* - - - - - - - - - - - - - - - - - - - - - - -
*
*/
int main(int, char**)
{
/*
* We are setting up the arrays in the same way
* as before
*/
std::cout << "Setting a=3, b=5, c=0\n";
auto a = new double[N];
auto b = new double[N];
auto c = new double[N];
for (int i=0; i<N; i++)
{
a[i] = 3.0;
b[i] = 5.0;
c[i] = 0.0;
}
/*
* This time, we also have to allocate
* memory on the 'device' which is our graphics card.
* our 'host' is the CPU, where this main function will run.
*/
double* device_a;
double* device_b;
double* device_c;
/*
* when we call `auto c = new double[N];` we are telling the CPU
* to allocate enough memory to fit N doubles. Now that we're also
* using a GPU, we have to tell the GPU to allocate enough memory
* for N doubles as well. We acomplish this with a cuda function:
*/
cudaMalloc(&device_a, N * sizeof(double));
cudaMalloc(&device_b, N * sizeof(double));
cudaMalloc(&device_c, N * sizeof(double));
/*
* Now we have a, b, and c allocated and set to 3, 5, and 0
* on the host. On the device however, we have only allocated
* the memory. The memory is uninitialized.
*
* To fix this, we will copy the values from a on the host
* into the memory allocated for a on the device, and same
* goes for b and c.
*/
cudaMemcpy(device_a, a, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, b, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_c, c, N * sizeof(double), cudaMemcpyHostToDevice);
/*
* Now that we have our memory copied from the host (cpu) to the
* device (gpu) we can call our cuda kernel. The kernel can *only*
* operate on memory allocated on the GPU.
*
* After examining the function call below, you may return to the
* top of the file to take a look at the kernel.
*
* Calling the function with function_name<<< , >>>(parameters);
* is how we inform cuda how it should configure our kernel.
*
* the first parameter to the triple angle brackets is the number of blocks
* per grid that should be allocated, and the second parameter is the number
* of threads per block that should be allocated.
* The grid is the largest unit of computation when calling a kernel.
*
* Note: grids and blocks are entirely defined in software. threads and
* warps are determined by the hardware. By aligning the number of
* blocks and threads in software with the threads in the physical
* hardware, we can achieve very large increases in performance.
*
* For example, calling `add_vectors<<<10, 1>>>(a, b, c)` would tell cuda
* to allocate it 10 blocks per grid, and 1 thread per block.
* Alternatively, calling `add_vectors<<<4, 10>>>(a, b, c)` would tell
* cuda to allocate 4 blocks, each with 10 threads per block totalling
* 40 threads.
*/
add_vectors<<<N, 1>>>(device_a, device_b, device_c);
/*
* Hopefully by now you have some understanding of the calling conventions
* for cuda kernels and the nature of the grid, blocks, and threads.
*
* Now let us copy the data back from the device to the host, and see if
* we still get what we expect.
*/
cudaMemcpy(c, device_c, N * sizeof(double), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++)
{
std::cout << "c["<<i<<"] = " << c[i] << "\n";
}
delete[] a;
delete[] b;
delete[] c;
/*
* We also have to free memory on the device since we allocated
* it in two places.
*/
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
|
5fd8e1a071993ce16fc392156a36075af092710a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FillWithProbabilityMask_V.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const float probability = 1;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FillWithProbabilityMask_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,probability,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FillWithProbabilityMask_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,probability,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FillWithProbabilityMask_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,probability,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
5fd8e1a071993ce16fc392156a36075af092710a.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FillWithProbabilityMask_V.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const float probability = 1;
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FillWithProbabilityMask_V<<<gridBlock,threadBlock>>>(a,probability,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FillWithProbabilityMask_V<<<gridBlock,threadBlock>>>(a,probability,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FillWithProbabilityMask_V<<<gridBlock,threadBlock>>>(a,probability,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
857770a2058fc2e6172f0860660d39bb281d9bee.hip
|
// !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <hip/hip_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
void test_cuda_cumsum(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size);
t_input.setRandom();
std::size_t t_input_bytes = t_input.size() * sizeof(float);
std::size_t t_result_bytes = t_result.size() * sizeof(float);
float* d_t_input;
float* d_t_result;
hipMalloc((void**)(&d_t_input), t_input_bytes);
hipMalloc((void**)(&d_t_result), t_result_bytes);
hipMemcpy(d_t_input, t_input.data(), t_input_bytes, hipMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size));
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size));
gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
t_result = t_input.cumsum(1);
hipMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, hipMemcpyDeviceToHost);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
}
if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
continue;
}
std::cout << "mismatch detected at index " << i << ": " << t_result(i)
<< " vs " << t_result_gpu(i) << std::endl;
assert(false);
}
hipFree((void*)d_t_input);
hipFree((void*)d_t_result);
}
void test_cxx11_tensor_scan_cuda()
{
CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
}
|
857770a2058fc2e6172f0860660d39bb281d9bee.cu
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <cuda_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
typedef Tensor<float, 1>::DimensionPair DimPair;
template<int DataLayout>
void test_cuda_cumsum(int m_size, int k_size, int n_size)
{
std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl;
Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size);
Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size);
t_input.setRandom();
std::size_t t_input_bytes = t_input.size() * sizeof(float);
std::size_t t_result_bytes = t_result.size() * sizeof(float);
float* d_t_input;
float* d_t_result;
cudaMalloc((void**)(&d_t_input), t_input_bytes);
cudaMalloc((void**)(&d_t_result), t_result_bytes);
cudaMemcpy(d_t_input, t_input.data(), t_input_bytes, cudaMemcpyHostToDevice);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size));
Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> >
gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size));
gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1);
t_result = t_input.cumsum(1);
cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost);
for (DenseIndex i = 0; i < t_result.size(); i++) {
if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) {
continue;
}
if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) {
continue;
}
std::cout << "mismatch detected at index " << i << ": " << t_result(i)
<< " vs " << t_result_gpu(i) << std::endl;
assert(false);
}
cudaFree((void*)d_t_input);
cudaFree((void*)d_t_result);
}
void test_cxx11_tensor_scan_cuda()
{
CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128));
CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128));
}
|
728b5d9c8b9a36db5d78332b1d5121f125cf6c12.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include "Error.h"
#define N 500
__global__ void additionMatricesKernel(int *d_a, int *d_b, int *d_c){
int i = threadIdx.x+blockIdx.x*blockDim.x;
int j = threadIdx.y+blockIdx.y*blockDim.y;
while( i < N ){
j = threadIdx.y + blockIdx.y * blockDim.y;
while( j < N){
d_c[ i*N+j ] = d_a[i*N+j] + d_b[i*N+j];
j+= blockDim.y * gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
}
void onDevice(int h_a[][N], int h_b[][N], int h_c[][N] ){
// declare GPU memory pointers
int *d_a, *d_b, *d_c;
const int ARRAY_BYTES = N * N * sizeof(int);
// allocate memory on the GPU
HANDLER_ERROR_ERR(hipMalloc((void**)&d_a,ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_b,ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_c,ARRAY_BYTES));
// copy data from CPU the GPU
HANDLER_ERROR_ERR(hipMemcpy(d_a, h_a, ARRAY_BYTES, hipMemcpyHostToDevice));
HANDLER_ERROR_ERR(hipMemcpy(d_b, h_b, ARRAY_BYTES, hipMemcpyHostToDevice));
HANDLER_ERROR_ERR(hipMemcpy(d_c, h_c, ARRAY_BYTES, hipMemcpyHostToDevice));
//execution configuration
dim3 GridBlocks( 4,4 );
dim3 ThreadsBlocks( 8,8 );
//run the kernel
hipLaunchKernelGGL(( additionMatricesKernel), dim3(GridBlocks),dim3(ThreadsBlocks), 0, 0, d_a, d_b, d_c );
HANDLER_ERROR_MSG("kernel panic!!!");
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(hipMemcpy(h_c, d_c, ARRAY_BYTES, hipMemcpyDeviceToHost));
// free GPU memory
HANDLER_ERROR_ERR(hipFree(d_a));
HANDLER_ERROR_ERR(hipFree(d_b));
HANDLER_ERROR_ERR(hipFree(d_c));
}
void test(int h_a[][N], int h_b[][N], int h_c[][N] ){
for(int i=0; i < N; i++){
for(int j = 0; j < N; j++){
assert(h_a[i][j] + h_b[i][j] == h_c[i][j]);
}
}
printf("-: successful execution :-\n");
}
void onHost(){
int i,j;
int h_a[N][N], h_b[N][N], h_c[N][N];
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
h_a[i][j] = h_b[i][j] = i+j;
h_c[i][j] = 0;
}
}
// call device configuration
onDevice(h_a,h_b,h_c);
test(h_a,h_b,h_c);
}
int main(){
onHost();
}
|
728b5d9c8b9a36db5d78332b1d5121f125cf6c12.cu
|
#include <stdio.h>
#include <assert.h>
#include "Error.h"
#define N 500
__global__ void additionMatricesKernel(int *d_a, int *d_b, int *d_c){
int i = threadIdx.x+blockIdx.x*blockDim.x;
int j = threadIdx.y+blockIdx.y*blockDim.y;
while( i < N ){
j = threadIdx.y + blockIdx.y * blockDim.y;
while( j < N){
d_c[ i*N+j ] = d_a[i*N+j] + d_b[i*N+j];
j+= blockDim.y * gridDim.y;
}
i+=blockDim.x*gridDim.x;
}
}
void onDevice(int h_a[][N], int h_b[][N], int h_c[][N] ){
// declare GPU memory pointers
int *d_a, *d_b, *d_c;
const int ARRAY_BYTES = N * N * sizeof(int);
// allocate memory on the GPU
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a,ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b,ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_c,ARRAY_BYTES));
// copy data from CPU the GPU
HANDLER_ERROR_ERR(cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice));
HANDLER_ERROR_ERR(cudaMemcpy(d_b, h_b, ARRAY_BYTES, cudaMemcpyHostToDevice));
HANDLER_ERROR_ERR(cudaMemcpy(d_c, h_c, ARRAY_BYTES, cudaMemcpyHostToDevice));
//execution configuration
dim3 GridBlocks( 4,4 );
dim3 ThreadsBlocks( 8,8 );
//run the kernel
additionMatricesKernel<<<GridBlocks,ThreadsBlocks>>>( d_a, d_b, d_c );
HANDLER_ERROR_MSG("kernel panic!!!");
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(cudaMemcpy(h_c, d_c, ARRAY_BYTES, cudaMemcpyDeviceToHost));
// free GPU memory
HANDLER_ERROR_ERR(cudaFree(d_a));
HANDLER_ERROR_ERR(cudaFree(d_b));
HANDLER_ERROR_ERR(cudaFree(d_c));
}
void test(int h_a[][N], int h_b[][N], int h_c[][N] ){
for(int i=0; i < N; i++){
for(int j = 0; j < N; j++){
assert(h_a[i][j] + h_b[i][j] == h_c[i][j]);
}
}
printf("-: successful execution :-\n");
}
void onHost(){
int i,j;
int h_a[N][N], h_b[N][N], h_c[N][N];
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
h_a[i][j] = h_b[i][j] = i+j;
h_c[i][j] = 0;
}
}
// call device configuration
onDevice(h_a,h_b,h_c);
test(h_a,h_b,h_c);
}
int main(){
onHost();
}
|
46d0512300dd468a0d509855d171b9439f6ad9a4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dcn_v2_im2col_cuda.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__device__ float dmcn_get_gradient_weight(float argmax_h, float argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
__device__ float dmcn_get_coordinate_weight(float argmax_h, float argmax_w,
const int height, const int width, const float *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const float *data_im, const float *data_offset, const float *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
float *data_col)
{
// launch channels * batch_size * height_col * width_col cores
CUDA_KERNEL_LOOP(index, n)
{
// NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow)
// here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
// const int b_col = (index / width_col / height_col) % batch_size;
const int b_col = (index / width_col / height_col / num_channels) % batch_size;
// const int c_im = (index / width_col / height_col) / batch_size;
const int c_im = (index / width_col / height_col) % num_channels;
// const int c_col = c_im * kernel_h * kernel_w;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
// float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = static_cast<float>(0);
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
// data_col_ptr += batch_size * height_col * width_col;
data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const float *data_col, const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
float *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
const float cur_inv_h_data = h_in + i * dilation_h + offset_h;
const float cur_inv_w_data = w_in + j * dilation_w + offset_w;
const float cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const float *data_col, const float *data_im,
const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
float *grad_offset, float *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
float val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float inv_h = h_in + i * dilation_h + offset_h;
float inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const float weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(hipStream_t stream,
const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(hipStream_t stream,
const float* data_col, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* grad_im){
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(hipStream_t stream,
const float* data_col, const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
float* grad_offset, float* grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset, grad_mask);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
}
|
46d0512300dd468a0d509855d171b9439f6ad9a4.cu
|
#include "dcn_v2_im2col_cuda.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__device__ float dmcn_get_gradient_weight(float argmax_h, float argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
__device__ float dmcn_get_coordinate_weight(float argmax_h, float argmax_w,
const int height, const int width, const float *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
float weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const float *data_im, const float *data_offset, const float *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
float *data_col)
{
// launch channels * batch_size * height_col * width_col cores
CUDA_KERNEL_LOOP(index, n)
{
// NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow)
// here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
// const int b_col = (index / width_col / height_col) % batch_size;
const int b_col = (index / width_col / height_col / num_channels) % batch_size;
// const int c_im = (index / width_col / height_col) / batch_size;
const int c_im = (index / width_col / height_col) % num_channels;
// const int c_col = c_im * kernel_h * kernel_w;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
// float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float val = static_cast<float>(0);
const float h_im = h_in + i * dilation_h + offset_h;
const float w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
// data_col_ptr += batch_size * height_col * width_col;
data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const float *data_col, const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
float *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
const float cur_inv_h_data = h_in + i * dilation_h + offset_h;
const float cur_inv_w_data = w_in + j * dilation_w + offset_w;
const float cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const float *data_col, const float *data_im,
const float *data_offset, const float *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
float *grad_offset, float *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
float val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const float offset_h = data_offset_ptr[data_offset_h_ptr];
const float offset_w = data_offset_ptr[data_offset_w_ptr];
const float mask = data_mask_ptr[data_mask_hw_ptr];
float inv_h = h_in + i * dilation_h + offset_h;
float inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const float weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(cudaStream_t stream,
const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* data_col) {
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(cudaStream_t stream,
const float* data_col, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, float* grad_im){
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
modulated_deformable_col2im_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(cudaStream_t stream,
const float* data_col, const float* data_im, const float* data_offset, const float* data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
float* grad_offset, float* grad_mask) {
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
modulated_deformable_col2im_coord_gpu_kernel
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset, grad_mask);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
}
|
f44cda49e92a909ec272264f108d75a6f6001179.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__constant__ float kernel_c[2 * HALF_WIDTH + 1];
__global__ void convolve_rows_kernel_naive(float *result, float *input, float *kernel, int num_cols, int num_rows)
{
int i1;
int j1, j2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
j1 = x - HALF_WIDTH;
j2 = x + HALF_WIDTH;
// Clamp at the edges of the matrix
if (j1 < 0)
j1 = 0;
if (j2 >= num_cols)
j2 = num_cols - 1;
// Obtain relative position of starting element from element being convolved
i1 = j1 - x;
// Obtain operating width of the kernel
j1 = j1 - x + HALF_WIDTH;
j2 = j2 - x + HALF_WIDTH;
// Convolve along row
result[y * num_cols + x] = 0.0f;
for(int i = i1, j = j1; j <= j2; j++, i++)
result[y * num_cols + x] += kernel[j] * input[y * num_cols + x + i];
return;
}
__global__ void convolve_columns_kernel_naive(float *result, float *input, float *kernel, int num_cols, int num_rows)
{
int i1;
int j1, j2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
j1 = y - HALF_WIDTH;
j2 = y + HALF_WIDTH;
// Clamp at the edges of the matrix
if (j1 < 0)
j1 = 0;
if (j2 >= num_rows)
j2 = num_rows - 1;
// Obtain relative position of starting element from element being convolved
i1 = j1 - y;
// Obtain the operating width of the kernel
j1 = j1 - y + HALF_WIDTH;
j2 = j2 - y + HALF_WIDTH;
// Convolve along column
result[y * num_cols + x] = 0.0f;
for(int i = i1, j = j1; j <= j2; j++, i++)
result[y * num_cols + x] += kernel[j] * input[y * num_cols + x + (i * num_cols)];
return;
}
__global__ void convolve_rows_kernel_optimized(float *result, float *input, int num_cols, int num_rows)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int col_start = x - HALF_WIDTH;
int col_end = x + HALF_WIDTH;
// Clamp at the edges of the matrix
if(col_start < 0)
col_start = 0;
if(col_end >= num_cols)
col_end = num_cols - 1;
// Obtain relative position of starting element from element being convolved
int row = col_start - x;
// Obtain operating width of the kernel
col_start = col_start - x + HALF_WIDTH;
col_end = col_end - x + HALF_WIDTH;
// Convolve along row
float res = 0.0f;
for(int j = col_start; j <= col_end; j++, row++)
res += kernel_c[j] * input[y * num_cols + x + row];
result[y * num_cols + x] = res;
return;
}
__global__ void convolve_columns_kernel_optimized(float *result, float *input, int num_cols, int num_rows)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int row_start = y - HALF_WIDTH;
int row_end = y + HALF_WIDTH;
// Clamp at the edges of the matrix
if(row_start < 0)
row_start = 0;
if(row_end >= num_rows)
row_end = num_rows - 1;
// Obtain relative position of starting element from element being convolved
int col = row_start - y;
// Obtain the operating width of the kernel
row_start = row_start - y + HALF_WIDTH;
row_end = row_end - y + HALF_WIDTH;
// Convolve along column
float res = 0.0f;
for(int j = row_start; j <= row_end; j++, col++)
res += kernel_c[j] * input[y * num_cols + x + (col * num_cols)];
result[y * num_cols + x] = res;
return;
}
|
f44cda49e92a909ec272264f108d75a6f6001179.cu
|
__constant__ float kernel_c[2 * HALF_WIDTH + 1];
__global__ void convolve_rows_kernel_naive(float *result, float *input, float *kernel, int num_cols, int num_rows)
{
int i1;
int j1, j2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
j1 = x - HALF_WIDTH;
j2 = x + HALF_WIDTH;
// Clamp at the edges of the matrix
if (j1 < 0)
j1 = 0;
if (j2 >= num_cols)
j2 = num_cols - 1;
// Obtain relative position of starting element from element being convolved
i1 = j1 - x;
// Obtain operating width of the kernel
j1 = j1 - x + HALF_WIDTH;
j2 = j2 - x + HALF_WIDTH;
// Convolve along row
result[y * num_cols + x] = 0.0f;
for(int i = i1, j = j1; j <= j2; j++, i++)
result[y * num_cols + x] += kernel[j] * input[y * num_cols + x + i];
return;
}
__global__ void convolve_columns_kernel_naive(float *result, float *input, float *kernel, int num_cols, int num_rows)
{
int i1;
int j1, j2;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
j1 = y - HALF_WIDTH;
j2 = y + HALF_WIDTH;
// Clamp at the edges of the matrix
if (j1 < 0)
j1 = 0;
if (j2 >= num_rows)
j2 = num_rows - 1;
// Obtain relative position of starting element from element being convolved
i1 = j1 - y;
// Obtain the operating width of the kernel
j1 = j1 - y + HALF_WIDTH;
j2 = j2 - y + HALF_WIDTH;
// Convolve along column
result[y * num_cols + x] = 0.0f;
for(int i = i1, j = j1; j <= j2; j++, i++)
result[y * num_cols + x] += kernel[j] * input[y * num_cols + x + (i * num_cols)];
return;
}
__global__ void convolve_rows_kernel_optimized(float *result, float *input, int num_cols, int num_rows)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int col_start = x - HALF_WIDTH;
int col_end = x + HALF_WIDTH;
// Clamp at the edges of the matrix
if(col_start < 0)
col_start = 0;
if(col_end >= num_cols)
col_end = num_cols - 1;
// Obtain relative position of starting element from element being convolved
int row = col_start - x;
// Obtain operating width of the kernel
col_start = col_start - x + HALF_WIDTH;
col_end = col_end - x + HALF_WIDTH;
// Convolve along row
float res = 0.0f;
for(int j = col_start; j <= col_end; j++, row++)
res += kernel_c[j] * input[y * num_cols + x + row];
result[y * num_cols + x] = res;
return;
}
__global__ void convolve_columns_kernel_optimized(float *result, float *input, int num_cols, int num_rows)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int row_start = y - HALF_WIDTH;
int row_end = y + HALF_WIDTH;
// Clamp at the edges of the matrix
if(row_start < 0)
row_start = 0;
if(row_end >= num_rows)
row_end = num_rows - 1;
// Obtain relative position of starting element from element being convolved
int col = row_start - y;
// Obtain the operating width of the kernel
row_start = row_start - y + HALF_WIDTH;
row_end = row_end - y + HALF_WIDTH;
// Convolve along column
float res = 0.0f;
for(int j = row_start; j <= row_end; j++, col++)
res += kernel_c[j] * input[y * num_cols + x + (col * num_cols)];
result[y * num_cols + x] = res;
return;
}
|
a65d40d39332bfd2b380e3d2d9615eccd4a8a0c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce_by_key.cuh"
#define TB_SIZE 256
#define MAX_K 4
using std::cout;
using std::endl;
using std::vector;
void print(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ") ";
}
void println(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ")" << endl;
}
void print(int a) {
cout << a << " ";
}
__device__ __host__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ void operator+=(float3 &a, float3 b)
{
a.x += b.x; a.y += b.y; a.z += b.z;
}
inline __host__ __device__ float3 operator/(const float3 &a, const int b) {
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ void operator/=(float3 &a, const int b) {
if (b != 0) {
a.x /= b; a.y /= b; a.z /= b;
}
else {
printf("Zero division!\n");
}
}
/****************************FLOAT3 VERSION*********************************/
__global__ void my_reduce_by_key_kernel(int n, int k, int *keys, float3 *values, float3 *almost_reduced_values) {
__shared__ float3 partial_sum[TB_SIZE][MAX_K];
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Initalize shared memory to zero!
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] = make_float3(0, 0, 0);
}
if (tid >= n) {
return;
}
__syncthreads();
const int key = keys[tid]; // value from 0 to k-1
// Load elements into shared memory
partial_sum[threadIdx.x][key] = values[tid];
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (threadIdx.x % (2*s) == 0) {
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] += partial_sum[threadIdx.x + s][i];
}
}
__syncthreads();
}
// Frist thread in a block writes to main memory
if (threadIdx.x == 0) {
for (int i = 0; i < k; ++i) {
const int pos = blockIdx.x * k + i;
almost_reduced_values[pos] = partial_sum[0][i];
}
}
}
// run at the end of the reduce by key with only one block
__global__ void sum_reduce(int n, int k, float3 *d_almost_reduces_values, float3 *output) {
__shared__ float3 partial_sum[TB_SIZE][MAX_K];
const int tid = threadIdx.x;
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] = make_float3(0, 0, 0);
}
__syncthreads();
if (tid < n) {
for (int i = 0; i < k; ++i) {
const int pos = tid * k + i;
partial_sum[tid][i] = d_almost_reduces_values[pos];
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (tid % (2*s) == 0) {
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] += partial_sum[threadIdx.x + s][i];
}
}
__syncthreads();
}
if (tid == 0) {
for (int i = 0; i < k; ++i) {
output[i] = partial_sum[0][i];
}
}
}
void my_reduce_by_key(int n, int k, int *d_keys,
float3* d_values,
float3 *d_almost_reduced_values,
float3 *d_output) {
const int N_BLOCKS = (n + TB_SIZE - 1) / TB_SIZE;
hipLaunchKernelGGL(( my_reduce_by_key_kernel), dim3(N_BLOCKS), dim3(TB_SIZE), 0, 0, n, k, d_keys, d_values, d_almost_reduced_values);
// if (n > TB_SIZE)
hipDeviceSynchronize();
hipLaunchKernelGGL(( sum_reduce), dim3(1), dim3(TB_SIZE), 0, 0, N_BLOCKS, k, d_almost_reduced_values, d_output);
hipDeviceSynchronize();
}
/****************************INT VERSION*********************************/
__global__ void my_reduce_by_key_kernel(int n, int k, int *keys, int *almost_reduced_values) {
__shared__ int partial_sum[TB_SIZE][MAX_K];
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n) {
return;
}
// Initalize shared memory to zero!
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] = 0;
}
const int key = keys[tid]; // value from 0 to k-1
// Load elements into shared memory
partial_sum[threadIdx.x][key] = 1; // 1 because we want to count here
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (threadIdx.x % (2*s) == 0) {
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] += partial_sum[threadIdx.x + s][i];
}
}
__syncthreads();
}
// Frist thread in a block writes to main memory
if (threadIdx.x == 0) {
for (int i = 0; i < k; ++i) {
const int pos = blockIdx.x * k + i;
almost_reduced_values[pos] = partial_sum[0][i];
}
}
}
// run at the end of the reduce by key with only one block
__global__ void sum_reduce(int n, int k, int *d_almost_reduces_values, float3 *output) {
__shared__ int partial_sum[TB_SIZE][MAX_K];
const int tid = threadIdx.x; // there are exactly N_BLOCKS threads and only one block at this point
// Initalizing shared memory to zero!
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] = 0;
}
__syncthreads();
if (tid < n) {
for (int i = 0; i < k; ++i) {
const int pos = tid * k + i;
partial_sum[tid][i] = d_almost_reduces_values[pos];
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (tid % (2*s) == 0 && tid + s < n) {
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] += partial_sum[tid + s][i];
}
}
__syncthreads();
}
if (tid == 0) {
for (int i = 0; i < k; ++i) {
output[i] /= partial_sum[0][i];
// output[i] = make_float3(partial_sum[0][i], partial_sum[0][i], partial_sum[0][i]);
}
}
}
void my_reduce_by_key(int n, int k, int *d_keys,
int *d_almost_reduced_values,
float3 *d_output) {
const int N_BLOCKS = (n + TB_SIZE - 1) / TB_SIZE;
hipLaunchKernelGGL(( my_reduce_by_key_kernel), dim3(N_BLOCKS), dim3(TB_SIZE), 0, 0, n, k, d_keys, d_almost_reduced_values);
// if (n > TB_SIZE)
hipDeviceSynchronize();
hipLaunchKernelGGL(( sum_reduce), dim3(1), dim3(TB_SIZE), 0, 0, N_BLOCKS, k, d_almost_reduced_values, d_output);
hipDeviceSynchronize();
}
|
a65d40d39332bfd2b380e3d2d9615eccd4a8a0c7.cu
|
#include "reduce_by_key.cuh"
#define TB_SIZE 256
#define MAX_K 4
using std::cout;
using std::endl;
using std::vector;
void print(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ") ";
}
void println(float3 point) {
cout << "float3(" << point.x << ", " << point.y << ", " << point.z << ")" << endl;
}
void print(int a) {
cout << a << " ";
}
__device__ __host__ float3 operator+(const float3 &a, const float3 &b) {
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __host__ __device__ void operator+=(float3 &a, float3 b)
{
a.x += b.x; a.y += b.y; a.z += b.z;
}
inline __host__ __device__ float3 operator/(const float3 &a, const int b) {
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __host__ __device__ void operator/=(float3 &a, const int b) {
if (b != 0) {
a.x /= b; a.y /= b; a.z /= b;
}
else {
printf("Zero division!\n");
}
}
/****************************FLOAT3 VERSION*********************************/
__global__ void my_reduce_by_key_kernel(int n, int k, int *keys, float3 *values, float3 *almost_reduced_values) {
__shared__ float3 partial_sum[TB_SIZE][MAX_K];
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Initalize shared memory to zero!
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] = make_float3(0, 0, 0);
}
if (tid >= n) {
return;
}
__syncthreads();
const int key = keys[tid]; // value from 0 to k-1
// Load elements into shared memory
partial_sum[threadIdx.x][key] = values[tid];
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (threadIdx.x % (2*s) == 0) {
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] += partial_sum[threadIdx.x + s][i];
}
}
__syncthreads();
}
// Frist thread in a block writes to main memory
if (threadIdx.x == 0) {
for (int i = 0; i < k; ++i) {
const int pos = blockIdx.x * k + i;
almost_reduced_values[pos] = partial_sum[0][i];
}
}
}
// run at the end of the reduce by key with only one block
__global__ void sum_reduce(int n, int k, float3 *d_almost_reduces_values, float3 *output) {
__shared__ float3 partial_sum[TB_SIZE][MAX_K];
const int tid = threadIdx.x;
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] = make_float3(0, 0, 0);
}
__syncthreads();
if (tid < n) {
for (int i = 0; i < k; ++i) {
const int pos = tid * k + i;
partial_sum[tid][i] = d_almost_reduces_values[pos];
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (tid % (2*s) == 0) {
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] += partial_sum[threadIdx.x + s][i];
}
}
__syncthreads();
}
if (tid == 0) {
for (int i = 0; i < k; ++i) {
output[i] = partial_sum[0][i];
}
}
}
void my_reduce_by_key(int n, int k, int *d_keys,
float3* d_values,
float3 *d_almost_reduced_values,
float3 *d_output) {
const int N_BLOCKS = (n + TB_SIZE - 1) / TB_SIZE;
my_reduce_by_key_kernel<<<N_BLOCKS, TB_SIZE>>> (n, k, d_keys, d_values, d_almost_reduced_values);
// if (n > TB_SIZE)
cudaDeviceSynchronize();
sum_reduce<<<1, TB_SIZE>>> (N_BLOCKS, k, d_almost_reduced_values, d_output);
cudaDeviceSynchronize();
}
/****************************INT VERSION*********************************/
__global__ void my_reduce_by_key_kernel(int n, int k, int *keys, int *almost_reduced_values) {
__shared__ int partial_sum[TB_SIZE][MAX_K];
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= n) {
return;
}
// Initalize shared memory to zero!
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] = 0;
}
const int key = keys[tid]; // value from 0 to k-1
// Load elements into shared memory
partial_sum[threadIdx.x][key] = 1; // 1 because we want to count here
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (threadIdx.x % (2*s) == 0) {
for (int i = 0; i < k; ++i) {
partial_sum[threadIdx.x][i] += partial_sum[threadIdx.x + s][i];
}
}
__syncthreads();
}
// Frist thread in a block writes to main memory
if (threadIdx.x == 0) {
for (int i = 0; i < k; ++i) {
const int pos = blockIdx.x * k + i;
almost_reduced_values[pos] = partial_sum[0][i];
}
}
}
// run at the end of the reduce by key with only one block
__global__ void sum_reduce(int n, int k, int *d_almost_reduces_values, float3 *output) {
__shared__ int partial_sum[TB_SIZE][MAX_K];
const int tid = threadIdx.x; // there are exactly N_BLOCKS threads and only one block at this point
// Initalizing shared memory to zero!
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] = 0;
}
__syncthreads();
if (tid < n) {
for (int i = 0; i < k; ++i) {
const int pos = tid * k + i;
partial_sum[tid][i] = d_almost_reduces_values[pos];
}
}
__syncthreads();
for (int s = 1; s < blockDim.x; s <<= 1) {
if (tid % (2*s) == 0 && tid + s < n) {
for (int i = 0; i < k; ++i) {
partial_sum[tid][i] += partial_sum[tid + s][i];
}
}
__syncthreads();
}
if (tid == 0) {
for (int i = 0; i < k; ++i) {
output[i] /= partial_sum[0][i];
// output[i] = make_float3(partial_sum[0][i], partial_sum[0][i], partial_sum[0][i]);
}
}
}
void my_reduce_by_key(int n, int k, int *d_keys,
int *d_almost_reduced_values,
float3 *d_output) {
const int N_BLOCKS = (n + TB_SIZE - 1) / TB_SIZE;
my_reduce_by_key_kernel<<<N_BLOCKS, TB_SIZE>>> (n, k, d_keys, d_almost_reduced_values);
// if (n > TB_SIZE)
cudaDeviceSynchronize();
sum_reduce<<<1, TB_SIZE>>> (N_BLOCKS, k, d_almost_reduced_values, d_output);
cudaDeviceSynchronize();
}
|
1c8fe934e4547a53ec7355dee85f4f875c3b2a17.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define KERNELS_H
#include "funobj.h"
#include "kernels.h"
LossFunction::LossFunction() {}
float LossFunction::TrainingAccuracy(tensor* networkOutput, tensor* labels) {
float* TrainAccuracy;
float* dAccuracy;
TrainAccuracy = (float*) calloc(0,sizeof(float));
hipMalloc((void**)&dAccuracy, sizeof(float));
hipMemcpy(dAccuracy, TrainAccuracy, sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(32);
dim3 dimGrid((networkOutput->col + dimBlock.x)/dimBlock.x);
hipLaunchKernelGGL(( kSoftMaxAccuracy), dim3(dimGrid), dim3(dimBlock), 0, 0, networkOutput->d_data, networkOutput->row, networkOutput->col, labels->d_data, dAccuracy);
hipMemcpy(TrainAccuracy, dAccuracy, sizeof(float), hipMemcpyDeviceToHost);
hipFree(dAccuracy);
return *TrainAccuracy / networkOutput->col;
}
tensor* LossFunction::calculate(tensor* networkOutput, tensor* labels, tensor* output) {
dim3 dimBlock(1,1);
dim3 dimGrid(1,1);
hipLaunchKernelGGL(( kSoftMax), dim3(dimGrid), dim3(dimBlock), 0, 0, networkOutput->d_data, networkOutput->row, networkOutput->col, labels->d_data, output->d_data);
return output;
}
float LossFunction::TestAccuracy(tensor* OutputVector, tensor* labels) {
float TestAccuracy = 0;
float* dev;
hipMalloc((void**)&dev, sizeof(float));
hipMemcpy(dev, &TestAccuracy, sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(32);
dim3 dimGrid((OutputVector->col + dimBlock.x)/dimBlock.x);
hipLaunchKernelGGL(( kSoftMaxAccuracy), dim3(dimGrid), dim3(dimBlock), 0, 0, OutputVector->d_data, OutputVector->row, OutputVector->col, labels->d_data, dev);
hipMemcpy(&TestAccuracy, dev, sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev);
return 100.0 * TestAccuracy / OutputVector->col;
}
|
1c8fe934e4547a53ec7355dee85f4f875c3b2a17.cu
|
#define KERNELS_H
#include "funobj.h"
#include "kernels.h"
LossFunction::LossFunction() {}
float LossFunction::TrainingAccuracy(tensor* networkOutput, tensor* labels) {
float* TrainAccuracy;
float* dAccuracy;
TrainAccuracy = (float*) calloc(0,sizeof(float));
cudaMalloc((void**)&dAccuracy, sizeof(float));
cudaMemcpy(dAccuracy, TrainAccuracy, sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(32);
dim3 dimGrid((networkOutput->col + dimBlock.x)/dimBlock.x);
kSoftMaxAccuracy<<<dimGrid, dimBlock>>>(networkOutput->d_data, networkOutput->row, networkOutput->col, labels->d_data, dAccuracy);
cudaMemcpy(TrainAccuracy, dAccuracy, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dAccuracy);
return *TrainAccuracy / networkOutput->col;
}
tensor* LossFunction::calculate(tensor* networkOutput, tensor* labels, tensor* output) {
dim3 dimBlock(1,1);
dim3 dimGrid(1,1);
kSoftMax<<<dimGrid, dimBlock>>>(networkOutput->d_data, networkOutput->row, networkOutput->col, labels->d_data, output->d_data);
return output;
}
float LossFunction::TestAccuracy(tensor* OutputVector, tensor* labels) {
float TestAccuracy = 0;
float* dev;
cudaMalloc((void**)&dev, sizeof(float));
cudaMemcpy(dev, &TestAccuracy, sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(32);
dim3 dimGrid((OutputVector->col + dimBlock.x)/dimBlock.x);
kSoftMaxAccuracy<<<dimGrid, dimBlock>>>(OutputVector->d_data, OutputVector->row, OutputVector->col, labels->d_data, dev);
cudaMemcpy(&TestAccuracy, dev, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev);
return 100.0 * TestAccuracy / OutputVector->col;
}
|
8eb73e316567bf9768786bd32fd699db71f9e07d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void adaMaxUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinv, const Nd4jLong* invShapeInfo,
const void* vinm, const Nd4jLong* inmShapeInfo, void* vz, const Nd4jLong* zShapeInfo,
void* vstV, const Nd4jLong* stvShapeInfo, void* vstM, const Nd4jLong* stmShapeInfo,
const T lr, const T beta1, const T beta2, const T epsilon, const T iteration) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto initU = reinterpret_cast<const T*>(vinv);
const auto initM = reinterpret_cast<const T*>(vinm);
auto up = reinterpret_cast<T*>(vz);
auto stU = reinterpret_cast<T*>(vstV);
auto stM = reinterpret_cast<T*>(vstM);
__shared__ Nd4jLong xLen;
__shared__ T beta1T, epsilonT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
beta1T = sd::math::nd4j_pow<T,T,T>(beta1, (iteration + 1) );
epsilonT = lr / (1.0 - beta1T);
if (sd::math::nd4j_isnan(epsilonT) || 0 == epsilonT || sd::math::nd4j_isinf(epsilonT))
epsilonT = epsilon;
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) &&
1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(stmShapeInfo) &&
shape::order(xShapeInfo) == shape::order(inmShapeInfo) && shape::order(xShapeInfo) == shape::order(invShapeInfo) &&
shape::order(xShapeInfo) == shape::order(stvShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo);
bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo);
bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo);
bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
auto xOffset = i, zOffset = i, initMOffset = i, initUOffset = i, stMOffset = i, stUOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initUOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords);
stUOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords);
initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords);
stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords);
}
//m = B_1 * m + (1-B_1)*grad
stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * (1 - beta1);
//u = max(B_2 * u, |grad|)
stU[stUOffset] = sd::math::nd4j_max( (beta2* initU[initUOffset]), sd::math::nd4j_abs(grad[xOffset])) + 1e-32;
up[zOffset] = (stM[stMOffset] * epsilonT) / stU[stUOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
linkage void adaMaxUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo,
const void* vinv, const Nd4jLong* invShapeInfo, const void* vinm, const Nd4jLong* inmShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vstV, const Nd4jLong* stvShapeInfo,
void* vstM, const Nd4jLong* stmShapeInfo, const double dLr,
const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) {
const T lr = static_cast<T>(dLr);
const T beta1 = static_cast<T>(dBeta1);
const T beta2 = static_cast<T>(dBeta2);
const T epsilon = static_cast<T>(dEpsilon);
const T iteration = static_cast<T>(nIteration);
hipLaunchKernelGGL(( adaMaxUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, * stream, vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vz,
zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo, lr, beta1, beta2, epsilon, iteration);
}
///////////////////////////////////////////////////////////////////
void updaterAdaMax(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateU, const NDArray& initStateM,
NDArray& update, NDArray& stateU, NDArray& stateM, const double dLr, const double dBeta1,
const double dBeta2, const double dEpsilon, const int nIteration) {
PointersManager manager(context, "adaMaxUpdater");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM });
BUILD_SINGLE_SELECTOR(gradient.dataType(), adaMaxUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(),
gradient.specialBuffer(), gradient.specialShapeInfo(), initStateU.specialBuffer(),
initStateU.specialShapeInfo(), initStateM.specialBuffer(), initStateM.specialShapeInfo(),
update.specialBuffer(), update.specialShapeInfo(), stateU.specialBuffer(),
stateU.specialShapeInfo(), stateM.specialBuffer(), stateM.specialShapeInfo(),
dLr, dBeta1, dBeta2, dEpsilon, nIteration ), FLOAT_TYPES);
NDArray::registerSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM });
manager.synchronize();
}
}
}
}
|
8eb73e316567bf9768786bd32fd699db71f9e07d.cu
|
/*******************************************************************************
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Oleh Semeniv ([email protected])
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/updatersHelpers.h>
#include <helpers/PointersManager.h>
#include <math/platformmath.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void adaMaxUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinv, const Nd4jLong* invShapeInfo,
const void* vinm, const Nd4jLong* inmShapeInfo, void* vz, const Nd4jLong* zShapeInfo,
void* vstV, const Nd4jLong* stvShapeInfo, void* vstM, const Nd4jLong* stmShapeInfo,
const T lr, const T beta1, const T beta2, const T epsilon, const T iteration) {
const auto grad = reinterpret_cast<const T*>(vx);
const auto initU = reinterpret_cast<const T*>(vinv);
const auto initM = reinterpret_cast<const T*>(vinm);
auto up = reinterpret_cast<T*>(vz);
auto stU = reinterpret_cast<T*>(vstV);
auto stM = reinterpret_cast<T*>(vstM);
__shared__ Nd4jLong xLen;
__shared__ T beta1T, epsilonT;
__shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
beta1T = sd::math::nd4j_pow<T,T,T>(beta1, (iteration + 1) );
epsilonT = lr / (1.0 - beta1T);
if (sd::math::nd4j_isnan(epsilonT) || 0 == epsilonT || sd::math::nd4j_isinf(epsilonT))
epsilonT = epsilon;
bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) &&
1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) &&
1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo);
bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(stmShapeInfo) &&
shape::order(xShapeInfo) == shape::order(inmShapeInfo) && shape::order(xShapeInfo) == shape::order(invShapeInfo) &&
shape::order(xShapeInfo) == shape::order(stvShapeInfo);
bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo);
bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo);
bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo);
bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo);
}
__syncthreads();
int coords[MAX_RANK];
for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
auto xOffset = i, zOffset = i, initMOffset = i, initUOffset = i, stMOffset = i, stUOffset = i;
if (!bEWS || !bOrdering) {
shape::index2coords(i, xShapeInfo, coords);
xOffset = shape::getOffset(xShapeInfo, coords);
zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords);
initUOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords);
stUOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords);
initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords);
stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords);
}
//m = B_1 * m + (1-B_1)*grad
stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * (1 - beta1);
//u = max(B_2 * u, |grad|)
stU[stUOffset] = sd::math::nd4j_max( (beta2* initU[initUOffset]), sd::math::nd4j_abs(grad[xOffset])) + 1e-32;
up[zOffset] = (stM[stMOffset] * epsilonT) / stU[stUOffset];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
linkage void adaMaxUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo,
const void* vinv, const Nd4jLong* invShapeInfo, const void* vinm, const Nd4jLong* inmShapeInfo,
void* vz, const Nd4jLong* zShapeInfo, void* vstV, const Nd4jLong* stvShapeInfo,
void* vstM, const Nd4jLong* stmShapeInfo, const double dLr,
const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) {
const T lr = static_cast<T>(dLr);
const T beta1 = static_cast<T>(dBeta1);
const T beta2 = static_cast<T>(dBeta2);
const T epsilon = static_cast<T>(dEpsilon);
const T iteration = static_cast<T>(nIteration);
adaMaxUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, 256, * stream>>>(vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vz,
zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo, lr, beta1, beta2, epsilon, iteration);
}
///////////////////////////////////////////////////////////////////
void updaterAdaMax(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateU, const NDArray& initStateM,
NDArray& update, NDArray& stateU, NDArray& stateM, const double dLr, const double dBeta1,
const double dBeta2, const double dEpsilon, const int nIteration) {
PointersManager manager(context, "adaMaxUpdater");
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM });
BUILD_SINGLE_SELECTOR(gradient.dataType(), adaMaxUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(),
gradient.specialBuffer(), gradient.specialShapeInfo(), initStateU.specialBuffer(),
initStateU.specialShapeInfo(), initStateM.specialBuffer(), initStateM.specialShapeInfo(),
update.specialBuffer(), update.specialShapeInfo(), stateU.specialBuffer(),
stateU.specialShapeInfo(), stateM.specialBuffer(), stateM.specialShapeInfo(),
dLr, dBeta1, dBeta2, dEpsilon, nIteration ), FLOAT_TYPES);
NDArray::registerSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM });
manager.synchronize();
}
}
}
}
|
128dffe5f92d64813360b37fa2b9977d801b8cc3.hip
|
// !!! This is a file automatically generated by hipify!!!
//dgemm testing cublas
//Stefan H
//5.12.12
#include <rocblas.h>
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <matrix.hpp>
#include <fillMatrix.hpp>
#include <Timer.hpp>
#include <Measurement.hpp>
int main() {
typedef hpc12::matrix<double,hpc12::column_major> matrix_type;
for (int N = 512;N < 15000;N+=512) {
matrix_type A(N,N);
matrix_type B(N,N);
matrix_type C(N,N);
double * d_A, * d_B, *d_C;
hipMalloc((void**) &d_A, N*N*sizeof(double));
hipMalloc((void**) &d_B, N*N*sizeof(double));
hipMalloc((void**) &d_C, N*N*sizeof(double));
fillMatrix(A);
fillMatrix(B);
//see whether it is faster to use pinned (page locked) memory for matrices on host
double * h_A, *h_B, *h_C;
hipHostMalloc((void**) &h_A,N*N*sizeof(double));
hipHostMalloc((void**) &h_B,N*N*sizeof(double));
hipHostMalloc((void**) &h_C,N*N*sizeof(double));
//transfer data into pinned memory
hipMemcpy(h_A, A.data(), N*N*sizeof(double),hipMemcpyHostToHost);
hipMemcpy(h_B, B.data(), N*N*sizeof(double),hipMemcpyHostToHost);
// include time to copy to /from device
Timer _t(1);
hipMemcpy (d_A, h_A,N*N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy (d_B, h_B ,N*N*sizeof(double),hipMemcpyHostToDevice);
hipMemset (d_C, 0., N*N*sizeof(double));
char trans = 'N';
double alpha = 1.;
double beta = 0.;
hipblasDgemm(trans,trans,N,N,N,alpha,d_A,N,d_B,N,beta,d_C,N);
hipMemcpy(h_C,d_C,N*N*sizeof(double),hipMemcpyDeviceToHost);
_t.stop();
Measurement m("cublasDgemm_MallocHost",N,N,_t.elapsed_s());
std::cout << m;
//free memory on device
hipFree(d_A);
hipFree(d_B);
hipFree(d_B);
//free pinned memory
hipHostFree(h_A);hipHostFree(h_B);hipHostFree(h_C);
}
return 0;
}
|
128dffe5f92d64813360b37fa2b9977d801b8cc3.cu
|
//dgemm testing cublas
//Stefan H
//5.12.12
#include <cublas.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <matrix.hpp>
#include <fillMatrix.hpp>
#include <Timer.hpp>
#include <Measurement.hpp>
int main() {
typedef hpc12::matrix<double,hpc12::column_major> matrix_type;
for (int N = 512;N < 15000;N+=512) {
matrix_type A(N,N);
matrix_type B(N,N);
matrix_type C(N,N);
double * d_A, * d_B, *d_C;
cudaMalloc((void**) &d_A, N*N*sizeof(double));
cudaMalloc((void**) &d_B, N*N*sizeof(double));
cudaMalloc((void**) &d_C, N*N*sizeof(double));
fillMatrix(A);
fillMatrix(B);
//see whether it is faster to use pinned (page locked) memory for matrices on host
double * h_A, *h_B, *h_C;
cudaMallocHost((void**) &h_A,N*N*sizeof(double));
cudaMallocHost((void**) &h_B,N*N*sizeof(double));
cudaMallocHost((void**) &h_C,N*N*sizeof(double));
//transfer data into pinned memory
cudaMemcpy(h_A, A.data(), N*N*sizeof(double),cudaMemcpyHostToHost);
cudaMemcpy(h_B, B.data(), N*N*sizeof(double),cudaMemcpyHostToHost);
// include time to copy to /from device
Timer _t(1);
cudaMemcpy (d_A, h_A,N*N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy (d_B, h_B ,N*N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemset (d_C, 0., N*N*sizeof(double));
char trans = 'N';
double alpha = 1.;
double beta = 0.;
cublasDgemm(trans,trans,N,N,N,alpha,d_A,N,d_B,N,beta,d_C,N);
cudaMemcpy(h_C,d_C,N*N*sizeof(double),cudaMemcpyDeviceToHost);
_t.stop();
Measurement m("cublasDgemm_MallocHost",N,N,_t.elapsed_s());
std::cout << m;
//free memory on device
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_B);
//free pinned memory
cudaFreeHost(h_A);cudaFreeHost(h_B);cudaFreeHost(h_C);
}
return 0;
}
|
6794b0636b39a33f2afe3a89b6f62ff8afa1a673.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transposeNoBankConflicts.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
hipMalloc(&odata, XSIZE*YSIZE);
const float *idata = NULL;
hipMalloc(&idata, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transposeNoBankConflicts), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transposeNoBankConflicts), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transposeNoBankConflicts), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6794b0636b39a33f2afe3a89b6f62ff8afa1a673.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transposeNoBankConflicts.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *odata = NULL;
cudaMalloc(&odata, XSIZE*YSIZE);
const float *idata = NULL;
cudaMalloc(&idata, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transposeNoBankConflicts<<<gridBlock,threadBlock>>>(odata,idata);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transposeNoBankConflicts<<<gridBlock,threadBlock>>>(odata,idata);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transposeNoBankConflicts<<<gridBlock,threadBlock>>>(odata,idata);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
971d211343105d36ef17cb77489404ddb3492a41.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <vector>
#define C 4
#define THREADS 1024 // 2^10
#define MAX 85
#define MAX_S MAX* MAX
#define PERM_MAX (MAX * (MAX - 1) * (MAX - 2) * (MAX - 3)) / 24
#define pb push_back
#define mp make_pair
#define gpuErrChk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, char* file, int line, bool abort = true)
{
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
getchar();
}
}
using namespace std;
typedef long long int64;
typedef pair<int, int> ii;
/*
sz ---> Adjacency matrix dimension (1D)
perm ---> Number of permutations of an instance
graph ---> Adjacency matrix itself
seeds ---> Set of seeds
faces ---> Set of triangular faces for the output
*/
struct Node {
int sz, perm;
int graph[MAX_S], seeds[C * PERM_MAX], F_ANS[6 * MAX];
};
/*
faces ---> Number of triangular faces
count ---> Number of remaining vertices
tmpMax ---> Max value obtained for a seed
F ---> Set of triangular faces
F ---> Set of remaining vertices
*/
struct Params {
int *faces, *count, *tmpMax;
int *F, *V;
};
/*
SIZE ---> Number of vertices
BLOCKS ---> Number of blocks
PERM ---> Number of permutations
R ---> Output graph for a possible solution
F ---> Set of triangular faces of an instance
qtd ---> Number of possible 4-cliques
*/
clock_t start, stop;
int SIZE, BLOCKS, PERM, qtd = 0;
int R[MAX_S], F[8 * MAX], bib[MAX];
Node* N;
__device__ void initializeDevice(Params* devP, int sz, int t)
{
devP->faces[t] = 0;
devP->tmpMax[t] = -1;
devP->count[t] = sz - 4;
}
/*
Generates a list containing the vertices which are not on the planar graph
*/
__device__ void generateList(Node* devN, Params* devP, int t)
{
int sz = devN->sz;
int va = devN->seeds[t], vb = devN->seeds[t + devN->perm], vc = devN->seeds[t + 2 * devN->perm], vd = devN->seeds[t + 3 * devN->perm];
for (int i = 0; i < sz; i++) {
if (i == va || i == vb || i == vc || i == vd)
devP->V[t + i * devN->perm] = -1;
else
devP->V[t + i * devN->perm] = i;
}
}
/*
Returns the weight of the planar graph so far
*/
__device__ void generateTriangularFaceList(Node* devN, Params* devP, int graph[], int t)
{
int sz = devN->sz;
int va = devN->seeds[t];
int vb = devN->seeds[t + devN->perm];
int vc = devN->seeds[t + 2 * devN->perm];
int vd = devN->seeds[t + 3 * devN->perm];
//generate first triangle of the output graph
devP->F[t + (devP->faces[t] * 3) * devN->perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vc;
int resp = graph[va * sz + vb] + graph[va * sz + vc] + graph[vb * sz + vc];
//generate the next 3 possible faces
devP->F[t + (devP->faces[t] * 3) * devN->perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vd;
devP->F[t + (devP->faces[t] * 3) * devN->perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vd;
devP->F[t + (devP->faces[t] * 3) * devN->perm] = vb;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vd;
resp += graph[va * sz + vd] + graph[vb * sz + vd] + graph[vc * sz + vd];
devP->tmpMax[t] = resp;
}
/*
Insert a new vertex, 3 new triangular faces and removes face 'f' from the set
*/
__device__ int operationT2(Node* devN, Params* devP, int graph[], int new_vertex, int f, int t)
{
int sz = devN->sz, perm = devN->perm;
//remove the chosen face and insert a new one
int va = devP->F[t + (f * 3) * perm];
int vb = devP->F[t + (f * 3 + 1) * perm];
int vc = devP->F[t + (f * 3 + 2) * perm];
devP->F[t + (f * 3) * perm] = new_vertex;
devP->F[t + (f * 3 + 1) * perm] = va;
devP->F[t + (f * 3 + 2) * perm] = vb;
//and insert the other two possible faces
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = va;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
int resp = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
return resp;
}
/*
Return the vertex with the maximum gain inserting within a face 'f'
*/
__device__ int maxGain(Node* devN, Params* devP, int graph[], int* f, int t)
{
int sz = devN->sz, perm = devN->perm;
int gain = -1, vertex = -1;
//iterate through the remaining vertices
for (int new_vertex = 0; new_vertex < sz; new_vertex++) {
if (devP->V[t + new_vertex * perm] == -1)
continue;
//and test which has the maximum gain with its insetion
//within all possible faces
int faces = devP->faces[t];
for (int i = 0; i < faces; i++) {
int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm];
int tmpGain = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
if (tmpGain > gain) {
gain = tmpGain;
*f = i;
vertex = new_vertex;
}
}
}
return vertex;
}
__device__ void tmfg(Node* devN, Params* devP, int graph[], int t)
{
while (devP->count[t]) {
int f = -1;
int vertex = maxGain(devN, devP, graph, &f, t);
devP->V[t + vertex * devN->perm] = -1;
devP->tmpMax[t] += operationT2(devN, devP, graph, vertex, f, t);
devP->count[t]--;
}
}
__global__ void tmfgParallel(Node* devN, Params devP, int* respMax, int* idx)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int sz = devN->sz, perm = devN->perm;
extern __shared__ int graph[];
for (int i = threadIdx.x; i < sz * sz; i += blockDim.x) {
graph[i] = devN->graph[i];
graph[i] = devN->graph[i];
}
__syncthreads();
if (x < perm) {
initializeDevice(&devP, sz, x);
generateList(devN, &devP, x);
generateTriangularFaceList(devN, &devP, graph, x);
tmfg(devN, &devP, graph, x);
__syncthreads();
atomicMax(respMax, devP.tmpMax[x]);
if (devP.tmpMax[x] == *respMax)
*idx = x;
__syncthreads();
}
}
int tmfgPrepare()
{
int resp = 0, idx = 0, *tmpResp, *tmpIdx;
gpuErrChk(hipMalloc((void**)&tmpResp, sizeof(int)));
gpuErrChk(hipMalloc((void**)&tmpIdx, sizeof(int)));
gpuErrChk(hipMemcpy(tmpResp, &resp, sizeof(int), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy(tmpIdx, &idx, sizeof(int), hipMemcpyHostToDevice));
Node* devN;
Params devP;
cout << "Amount of memory: " << (3 * PERM + PERM * SIZE + 6 * SIZE * PERM * sizeof(int)) / 1000000 << "MB" << endl;
gpuErrChk(hipMalloc((void**)&devN, sizeof(Node)));
gpuErrChk(hipMemcpy(devN, N, sizeof(Node), hipMemcpyHostToDevice));
cout << "1 done." << endl;
gpuErrChk(hipMalloc((void**)&devP.faces, PERM * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.count, PERM * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.tmpMax, PERM * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.F, PERM * 6 * SIZE * sizeof(int)));
gpuErrChk(hipMalloc((void**)&devP.V, PERM * SIZE * sizeof(int)));
cout << "2 done." << endl;
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
cout << "Launching kernel..." << endl;
hipLaunchKernelGGL(( tmfgParallel), dim3(blocks), dim3(threads), SIZE * SIZE * sizeof(int), 0, devN, devP, tmpResp, tmpIdx);
gpuErrChk(hipDeviceSynchronize());
cout << "Kernel finished." << endl;
//copy back the maximum weight and the index of the graph
//which gave this result
gpuErrChk(hipMemcpy(&resp, tmpResp, sizeof(int), hipMemcpyDeviceToHost));
cout << "1 done." << endl;
gpuErrChk(hipMemcpy(&idx, tmpIdx, sizeof(int), hipMemcpyDeviceToHost));
cout << "2 done." << endl;
//gpuErrChk(hipMemcpy(&F, devP.F[idx + ], (6*MAX)*sizeof(int), hipMemcpyDeviceToHost));
cout << "3 done." << endl;
gpuErrChk(hipFree(devN));
gpuErrChk(hipFree(devP.faces));
gpuErrChk(hipFree(devP.count));
gpuErrChk(hipFree(devP.tmpMax));
gpuErrChk(hipFree(devP.F));
gpuErrChk(hipFree(devP.V));
cout << "Completed." << endl;
return resp;
}
void printElapsedTime(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
cout << fixed << setprecision(3) << "Elapsed time: " << elapsed << "s\n";
}
/*
C ---> Size of the combination
index ---> Current index in data[]
data[] ---> Temporary array to store a current combination
i ---> Index of current element in vertices[]
*/
void combineUntil(int index, vector<int>& data, int i)
{
// Current cobination is ready, print it
if (index == C) {
for (int j = 0; j < C; j++) {
N->seeds[qtd + j * PERM] = data[j];
}
qtd++;
return;
}
// When there are no more elements to put in data[]
if (i >= SIZE)
return;
//current is inserted; put next at a next location
data[index] = i;
combineUntil(index + 1, data, i + 1);
//current is deleted; replace it with next
combineUntil(index, data, i + 1);
}
/*
Print all combinations of size 'C' using a temporary array 'data'
*/
void combine()
{
vector<int> data(C);
combineUntil(0, data, 0);
}
void initialize()
{
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
R[i * SIZE + j] = R[j * SIZE + i] = -1;
}
}
}
void readInput()
{
int x;
cin >> SIZE;
PERM = bib[SIZE - 1];
BLOCKS = PERM / THREADS + 1;
N = (Node*)malloc(sizeof(Node));
N->sz = SIZE;
N->perm = PERM;
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
cin >> x;
N->graph[i * SIZE + j] = x;
N->graph[j * SIZE + i] = x;
}
}
}
/*
Define the number of permutations and blocks
*/
void sizeDefinitions()
{
for (int i = 6; i <= MAX; i++) {
int resp = 1;
for (int j = i - 3; j <= i; j++)
resp *= j;
resp /= 24;
bib[i - 1] = resp;
}
}
int main(int argv, char** argc)
{
ios::sync_with_stdio(false);
sizeDefinitions();
//read the input, which is given by a size of a graph and its weighted edges.
//the graph given is dense.
readInput();
initialize();
//generate multiple 4-clique seeds, given the number of vertices
combine();
hipSetDevice(3);
start = clock();
int respMax = tmfgPrepare();
stop = clock();
//reconstruct the graph given the regions of the graph
// for ( int i = 0; i < 2*SIZE; i++ ){
// int va = F[i*3], vb = F[i*3 + 1], vc = F[i*3 + 2];
// if ( va == vb && vb == vc ) continue;
// R[va*SIZE + vb] = R[vb*SIZE + va] = N->graph[va*SIZE + vb];
// R[va*SIZE + vc] = R[vc*SIZE + va] = N->graph[va*SIZE + vc];
// R[vb*SIZE + vc] = R[vc*SIZE + vb] = N->graph[vb*SIZE + vc];
// }
// cout << "Printing generated graph: " << endl;
// for ( int i = 0; i < SIZE; i++ ){
// for ( int j = i+1; j < SIZE; j++ ){
// cout << R[i*SIZE + j] << " ";
// }
// cout << endl;
// }
printElapsedTime(start, stop);
cout << "Maximum weight found: " << respMax << endl;
free(N);
gpuErrChk(hipDeviceReset());
return 0;
}
|
971d211343105d36ef17cb77489404ddb3492a41.cu
|
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <vector>
#define C 4
#define THREADS 1024 // 2^10
#define MAX 85
#define MAX_S MAX* MAX
#define PERM_MAX (MAX * (MAX - 1) * (MAX - 2) * (MAX - 3)) / 24
#define pb push_back
#define mp make_pair
#define gpuErrChk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, char* file, int line, bool abort = true)
{
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
getchar();
}
}
using namespace std;
typedef long long int64;
typedef pair<int, int> ii;
/*
sz ---> Adjacency matrix dimension (1D)
perm ---> Number of permutations of an instance
graph ---> Adjacency matrix itself
seeds ---> Set of seeds
faces ---> Set of triangular faces for the output
*/
struct Node {
int sz, perm;
int graph[MAX_S], seeds[C * PERM_MAX], F_ANS[6 * MAX];
};
/*
faces ---> Number of triangular faces
count ---> Number of remaining vertices
tmpMax ---> Max value obtained for a seed
F ---> Set of triangular faces
F ---> Set of remaining vertices
*/
struct Params {
int *faces, *count, *tmpMax;
int *F, *V;
};
/*
SIZE ---> Number of vertices
BLOCKS ---> Number of blocks
PERM ---> Number of permutations
R ---> Output graph for a possible solution
F ---> Set of triangular faces of an instance
qtd ---> Number of possible 4-cliques
*/
clock_t start, stop;
int SIZE, BLOCKS, PERM, qtd = 0;
int R[MAX_S], F[8 * MAX], bib[MAX];
Node* N;
__device__ void initializeDevice(Params* devP, int sz, int t)
{
devP->faces[t] = 0;
devP->tmpMax[t] = -1;
devP->count[t] = sz - 4;
}
/*
Generates a list containing the vertices which are not on the planar graph
*/
__device__ void generateList(Node* devN, Params* devP, int t)
{
int sz = devN->sz;
int va = devN->seeds[t], vb = devN->seeds[t + devN->perm], vc = devN->seeds[t + 2 * devN->perm], vd = devN->seeds[t + 3 * devN->perm];
for (int i = 0; i < sz; i++) {
if (i == va || i == vb || i == vc || i == vd)
devP->V[t + i * devN->perm] = -1;
else
devP->V[t + i * devN->perm] = i;
}
}
/*
Returns the weight of the planar graph so far
*/
__device__ void generateTriangularFaceList(Node* devN, Params* devP, int graph[], int t)
{
int sz = devN->sz;
int va = devN->seeds[t];
int vb = devN->seeds[t + devN->perm];
int vc = devN->seeds[t + 2 * devN->perm];
int vd = devN->seeds[t + 3 * devN->perm];
//generate first triangle of the output graph
devP->F[t + (devP->faces[t] * 3) * devN->perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vc;
int resp = graph[va * sz + vb] + graph[va * sz + vc] + graph[vb * sz + vc];
//generate the next 3 possible faces
devP->F[t + (devP->faces[t] * 3) * devN->perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vd;
devP->F[t + (devP->faces[t] * 3) * devN->perm] = va;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vd;
devP->F[t + (devP->faces[t] * 3) * devN->perm] = vb;
devP->F[t + (devP->faces[t] * 3 + 1) * devN->perm] = vc;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * devN->perm] = vd;
resp += graph[va * sz + vd] + graph[vb * sz + vd] + graph[vc * sz + vd];
devP->tmpMax[t] = resp;
}
/*
Insert a new vertex, 3 new triangular faces and removes face 'f' from the set
*/
__device__ int operationT2(Node* devN, Params* devP, int graph[], int new_vertex, int f, int t)
{
int sz = devN->sz, perm = devN->perm;
//remove the chosen face and insert a new one
int va = devP->F[t + (f * 3) * perm];
int vb = devP->F[t + (f * 3 + 1) * perm];
int vc = devP->F[t + (f * 3 + 2) * perm];
devP->F[t + (f * 3) * perm] = new_vertex;
devP->F[t + (f * 3 + 1) * perm] = va;
devP->F[t + (f * 3 + 2) * perm] = vb;
//and insert the other two possible faces
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = va;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
devP->F[t + (devP->faces[t] * 3) * perm] = new_vertex;
devP->F[t + (devP->faces[t] * 3 + 1) * perm] = vb;
devP->F[t + ((devP->faces[t]++) * 3 + 2) * perm] = vc;
int resp = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
return resp;
}
/*
Return the vertex with the maximum gain inserting within a face 'f'
*/
__device__ int maxGain(Node* devN, Params* devP, int graph[], int* f, int t)
{
int sz = devN->sz, perm = devN->perm;
int gain = -1, vertex = -1;
//iterate through the remaining vertices
for (int new_vertex = 0; new_vertex < sz; new_vertex++) {
if (devP->V[t + new_vertex * perm] == -1)
continue;
//and test which has the maximum gain with its insetion
//within all possible faces
int faces = devP->faces[t];
for (int i = 0; i < faces; i++) {
int va = devP->F[t + (i * 3) * perm], vb = devP->F[t + (i * 3 + 1) * perm], vc = devP->F[t + (i * 3 + 2) * perm];
int tmpGain = graph[va * sz + new_vertex] + graph[vb * sz + new_vertex] + graph[vc * sz + new_vertex];
if (tmpGain > gain) {
gain = tmpGain;
*f = i;
vertex = new_vertex;
}
}
}
return vertex;
}
__device__ void tmfg(Node* devN, Params* devP, int graph[], int t)
{
while (devP->count[t]) {
int f = -1;
int vertex = maxGain(devN, devP, graph, &f, t);
devP->V[t + vertex * devN->perm] = -1;
devP->tmpMax[t] += operationT2(devN, devP, graph, vertex, f, t);
devP->count[t]--;
}
}
__global__ void tmfgParallel(Node* devN, Params devP, int* respMax, int* idx)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int sz = devN->sz, perm = devN->perm;
extern __shared__ int graph[];
for (int i = threadIdx.x; i < sz * sz; i += blockDim.x) {
graph[i] = devN->graph[i];
graph[i] = devN->graph[i];
}
__syncthreads();
if (x < perm) {
initializeDevice(&devP, sz, x);
generateList(devN, &devP, x);
generateTriangularFaceList(devN, &devP, graph, x);
tmfg(devN, &devP, graph, x);
__syncthreads();
atomicMax(respMax, devP.tmpMax[x]);
if (devP.tmpMax[x] == *respMax)
*idx = x;
__syncthreads();
}
}
int tmfgPrepare()
{
int resp = 0, idx = 0, *tmpResp, *tmpIdx;
gpuErrChk(cudaMalloc((void**)&tmpResp, sizeof(int)));
gpuErrChk(cudaMalloc((void**)&tmpIdx, sizeof(int)));
gpuErrChk(cudaMemcpy(tmpResp, &resp, sizeof(int), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy(tmpIdx, &idx, sizeof(int), cudaMemcpyHostToDevice));
Node* devN;
Params devP;
cout << "Amount of memory: " << (3 * PERM + PERM * SIZE + 6 * SIZE * PERM * sizeof(int)) / 1000000 << "MB" << endl;
gpuErrChk(cudaMalloc((void**)&devN, sizeof(Node)));
gpuErrChk(cudaMemcpy(devN, N, sizeof(Node), cudaMemcpyHostToDevice));
cout << "1 done." << endl;
gpuErrChk(cudaMalloc((void**)&devP.faces, PERM * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.count, PERM * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.tmpMax, PERM * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.F, PERM * 6 * SIZE * sizeof(int)));
gpuErrChk(cudaMalloc((void**)&devP.V, PERM * SIZE * sizeof(int)));
cout << "2 done." << endl;
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
cout << "Launching kernel..." << endl;
tmfgParallel<<<blocks, threads, SIZE * SIZE * sizeof(int)>>>(devN, devP, tmpResp, tmpIdx);
gpuErrChk(cudaDeviceSynchronize());
cout << "Kernel finished." << endl;
//copy back the maximum weight and the index of the graph
//which gave this result
gpuErrChk(cudaMemcpy(&resp, tmpResp, sizeof(int), cudaMemcpyDeviceToHost));
cout << "1 done." << endl;
gpuErrChk(cudaMemcpy(&idx, tmpIdx, sizeof(int), cudaMemcpyDeviceToHost));
cout << "2 done." << endl;
//gpuErrChk(cudaMemcpy(&F, devP.F[idx + ], (6*MAX)*sizeof(int), cudaMemcpyDeviceToHost));
cout << "3 done." << endl;
gpuErrChk(cudaFree(devN));
gpuErrChk(cudaFree(devP.faces));
gpuErrChk(cudaFree(devP.count));
gpuErrChk(cudaFree(devP.tmpMax));
gpuErrChk(cudaFree(devP.F));
gpuErrChk(cudaFree(devP.V));
cout << "Completed." << endl;
return resp;
}
void printElapsedTime(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
cout << fixed << setprecision(3) << "Elapsed time: " << elapsed << "s\n";
}
/*
C ---> Size of the combination
index ---> Current index in data[]
data[] ---> Temporary array to store a current combination
i ---> Index of current element in vertices[]
*/
void combineUntil(int index, vector<int>& data, int i)
{
// Current cobination is ready, print it
if (index == C) {
for (int j = 0; j < C; j++) {
N->seeds[qtd + j * PERM] = data[j];
}
qtd++;
return;
}
// When there are no more elements to put in data[]
if (i >= SIZE)
return;
//current is inserted; put next at a next location
data[index] = i;
combineUntil(index + 1, data, i + 1);
//current is deleted; replace it with next
combineUntil(index, data, i + 1);
}
/*
Print all combinations of size 'C' using a temporary array 'data'
*/
void combine()
{
vector<int> data(C);
combineUntil(0, data, 0);
}
void initialize()
{
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
R[i * SIZE + j] = R[j * SIZE + i] = -1;
}
}
}
void readInput()
{
int x;
cin >> SIZE;
PERM = bib[SIZE - 1];
BLOCKS = PERM / THREADS + 1;
N = (Node*)malloc(sizeof(Node));
N->sz = SIZE;
N->perm = PERM;
for (int i = 0; i < SIZE; i++) {
for (int j = i + 1; j < SIZE; j++) {
cin >> x;
N->graph[i * SIZE + j] = x;
N->graph[j * SIZE + i] = x;
}
}
}
/*
Define the number of permutations and blocks
*/
void sizeDefinitions()
{
for (int i = 6; i <= MAX; i++) {
int resp = 1;
for (int j = i - 3; j <= i; j++)
resp *= j;
resp /= 24;
bib[i - 1] = resp;
}
}
int main(int argv, char** argc)
{
ios::sync_with_stdio(false);
sizeDefinitions();
//read the input, which is given by a size of a graph and its weighted edges.
//the graph given is dense.
readInput();
initialize();
//generate multiple 4-clique seeds, given the number of vertices
combine();
cudaSetDevice(3);
start = clock();
int respMax = tmfgPrepare();
stop = clock();
//reconstruct the graph given the regions of the graph
// for ( int i = 0; i < 2*SIZE; i++ ){
// int va = F[i*3], vb = F[i*3 + 1], vc = F[i*3 + 2];
// if ( va == vb && vb == vc ) continue;
// R[va*SIZE + vb] = R[vb*SIZE + va] = N->graph[va*SIZE + vb];
// R[va*SIZE + vc] = R[vc*SIZE + va] = N->graph[va*SIZE + vc];
// R[vb*SIZE + vc] = R[vc*SIZE + vb] = N->graph[vb*SIZE + vc];
// }
// cout << "Printing generated graph: " << endl;
// for ( int i = 0; i < SIZE; i++ ){
// for ( int j = i+1; j < SIZE; j++ ){
// cout << R[i*SIZE + j] << " ";
// }
// cout << endl;
// }
printElapsedTime(start, stop);
cout << "Maximum weight found: " << respMax << endl;
free(N);
gpuErrChk(cudaDeviceReset());
return 0;
}
|
ebe0041c84a8443ce5d543e09e12a7e8305f0491.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/image_resize.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = i * scale;
interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = nd4j::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, T* outputYptr, Nd4jLong* outputShape, Nd4jLong batchSize,
Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
if (blockIdx.x < batchSize) { // blockIdx.x as batch index
auto pX = input + blockIdx.x * inBatchNumValues;
auto channelStart = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T *ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T *ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + y * outRowSize;
for (Nd4jLong x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = channelStart; c < channels; c += step) {
double topLeft(ys_input_lower_ptr[xsBottom + c]);
double topRight(ys_input_lower_ptr[xsTop + c]);
double bottomLeft(ys_input_upper_ptr[xsBottom + c]);
double bottomRight(ys_input_upper_ptr[xsTop + c]);
double top = topLeft + (topRight - topLeft) * xVal;
double bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
pZ[x * channels + c] = T(top + (bottom - top) * yVal);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T>
static void resizeImage_(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const *input_b_ptr = reinterpret_cast<T const *>(images->getSpecialBuffer()); // this works only with 'c' direction
T *output_y_ptr = reinterpret_cast<T *>(output->specialBuffer());
hipLaunchKernelGGL(( resizeImageKernel<T>), dim3(batchSize), dim3(outHeight), 256, *stream, input_b_ptr, images->getSpecialShapeInfo(), output_y_ptr, output->specialShapeInfo(), batchSize,
outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static int resizeBilinearFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// Special case for TF compatibility
if((center && inHeight < 2) || (center && inWidth < 2)){
center = false;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_bilinear: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
float heightScale = center ? (inHeight - 1.f) / double(outHeight - 1.f) : (inHeight / float(outHeight));
float widthScale = center ? (inWidth - 1.f) / double(outWidth - 1.f) : (inWidth / float(outWidth));
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
hipError_t err = hipMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = hipMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
hipLaunchKernelGGL(( computeInterpolationWeights), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
NDArray::prepareSpecialUse({output}, {images});
resizeImage(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
NDArray::registerSpecialUse({output}, {images});
err = hipFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = hipFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
Nd4jLong inY = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(y * heightScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
y * heightScale)), inHeight - 1);
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
Nd4jLong inX = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(x * widthScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
x * widthScale)), inWidth - 1);
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
double heightScale = center ? (inHeight - 1.) / double(outHeight - 1.0) : (inHeight / double(outHeight));
double widthScale = center ? (inWidth - 1.) / double(outWidth - 1.0) : (inWidth / double(outWidth));
auto imagesBuffer = reinterpret_cast<T const*>(images->getSpecialBuffer());
auto outputBuffer = reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
//T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong* outputShape,
// Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center
//input, inputShape, output, outputShape,
// batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center
NDArray::prepareSpecialUse({output}, {images});
hipLaunchKernelGGL(( resizeNeighborKernel<T>), dim3(batchSize), dim3(outHeight * outWidth), 512, *stream, imagesBuffer, images->getSpecialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void resizeImage_,(nd4j::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeBilinearFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (nd4j::LaunchContext* context, NDArray const* images,
int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// --------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong* imagesShape, Z const* boxes, Nd4jLong* boxesShape,
I const* indices, Nd4jLong* indexShape, I const* cropSize, Nd4jLong* cropShape, int method,
double extrapolationVal, Z* output, Nd4jLong* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = nd4j::math::p_floor(inY);
const int bottomYIndex = nd4j::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
static void cropAndResizeFunctor_(nd4j::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->getSpecialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->getSpecialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->getSpecialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->getSpecialBuffer());
Z* outBuf = reinterpret_cast<Z*>(crops->specialBuffer());
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
hipLaunchKernelGGL(( cropAndResizeKernel<T,Z,I>), dim3(batchSize), dim3(math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth)), 512, *stream, imagesBuf, images->getSpecialShapeInfo(), boxesBuf, boxes->getSpecialShapeInfo(), indexBuf, indices->getSpecialShapeInfo(),
cropSizes, cropSize->getSpecialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(nd4j::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(nd4j::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
}
|
ebe0041c84a8443ce5d543e09e12a7e8305f0491.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/image_resize.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = i * scale;
interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = nd4j::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, T* outputYptr, Nd4jLong* outputShape, Nd4jLong batchSize,
Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
if (blockIdx.x < batchSize) { // blockIdx.x as batch index
auto pX = input + blockIdx.x * inBatchNumValues;
auto channelStart = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T *ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T *ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + y * outRowSize;
for (Nd4jLong x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = channelStart; c < channels; c += step) {
double topLeft(ys_input_lower_ptr[xsBottom + c]);
double topRight(ys_input_lower_ptr[xsTop + c]);
double bottomLeft(ys_input_upper_ptr[xsBottom + c]);
double bottomRight(ys_input_upper_ptr[xsTop + c]);
double top = topLeft + (topRight - topLeft) * xVal;
double bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
pZ[x * channels + c] = T(top + (bottom - top) * yVal);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T>
static void resizeImage_(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const *input_b_ptr = reinterpret_cast<T const *>(images->getSpecialBuffer()); // this works only with 'c' direction
T *output_y_ptr = reinterpret_cast<T *>(output->specialBuffer());
resizeImageKernel<T><<<batchSize, outHeight, 256, *stream>>>(input_b_ptr, images->getSpecialShapeInfo(), output_y_ptr, output->specialShapeInfo(), batchSize,
outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static int resizeBilinearFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// Special case for TF compatibility
if((center && inHeight < 2) || (center && inWidth < 2)){
center = false;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_bilinear: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
float heightScale = center ? (inHeight - 1.f) / double(outHeight - 1.f) : (inHeight / float(outHeight));
float widthScale = center ? (inWidth - 1.f) / double(outWidth - 1.f) : (inWidth / float(outWidth));
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
cudaError_t err = cudaMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = cudaMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
computeInterpolationWeights<<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights<<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
NDArray::prepareSpecialUse({output}, {images});
resizeImage(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
NDArray::registerSpecialUse({output}, {images});
err = cudaFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = cudaFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
Nd4jLong inY = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(y * heightScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
y * heightScale)), inHeight - 1);
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
Nd4jLong inX = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(x * widthScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
x * widthScale)), inWidth - 1);
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
double heightScale = center ? (inHeight - 1.) / double(outHeight - 1.0) : (inHeight / double(outHeight));
double widthScale = center ? (inWidth - 1.) / double(outWidth - 1.0) : (inWidth / double(outWidth));
auto imagesBuffer = reinterpret_cast<T const*>(images->getSpecialBuffer());
auto outputBuffer = reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
//T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong* outputShape,
// Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center
//input, inputShape, output, outputShape,
// batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center
NDArray::prepareSpecialUse({output}, {images});
resizeNeighborKernel<T><<<batchSize, outHeight * outWidth, 512, *stream>>>(imagesBuffer, images->getSpecialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void resizeImage_,(nd4j::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeBilinearFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (nd4j::LaunchContext* context, NDArray const* images,
int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// --------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong* imagesShape, Z const* boxes, Nd4jLong* boxesShape,
I const* indices, Nd4jLong* indexShape, I const* cropSize, Nd4jLong* cropShape, int method,
double extrapolationVal, Z* output, Nd4jLong* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = nd4j::math::p_floor(inY);
const int bottomYIndex = nd4j::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
static void cropAndResizeFunctor_(nd4j::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->getSpecialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->getSpecialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->getSpecialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->getSpecialBuffer());
Z* outBuf = reinterpret_cast<Z*>(crops->specialBuffer());
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
cropAndResizeKernel<T,Z,I><<<batchSize, math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth), 512, *stream>>>(imagesBuf, images->getSpecialShapeInfo(), boxesBuf, boxes->getSpecialShapeInfo(), indexBuf, indices->getSpecialShapeInfo(),
cropSizes, cropSize->getSpecialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(nd4j::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(nd4j::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
}
|
f22c4f752de6d7487068825e879656307282214f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cinttypes>
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#define THREAD_COUNT 128
#define TASK_WORK (1ULL << 30)
#define FAST_NEXT_INT
#ifdef BOINC
#include "boinc_api.h"
#if defined _WIN32 || defined _WIN64
#include "boinc_win.h"
#endif
#endif
#ifndef BOINC
#define boinc_fopen(file, mode) fopen(file, mode)
#define boinc_delete_file(file) remove(file)
#define boinc_begin_critical_section()
#define boinc_end_critical_section()
#define boinc_fraction_done(frac)
#define boinc_finish(s) exit(s)
#define boinc_time_to_checkpoint() true
#define boinc_checkpoint_completed()
#endif
namespace Random {
#define RANDOM__MULTIPLIER 25214903917ULL
#define RANDOM__MULTIPLIER_INVERSE 246154705703781ULL
#define RANDOM__ADDEND 11ULL
#define RANDOM__ADDEND_INVERSE 107048004364969ULL
#define RANDOM__MASK ((1ULL << 48) - 1)
__device__ uint64_t setSeed(uint64_t seed) {
return (seed ^ RANDOM__MULTIPLIER) & RANDOM__MASK;
}
__device__ int32_t next(uint64_t &seed, int bits) {
seed = (seed * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
return (int32_t)(seed >> (48 - bits));
}
__device__ int32_t nextInt(uint64_t &seed) {
return next(seed, 32);
}
__device__ int32_t nextInt(uint64_t &seed, int bound) {
if ((bound & -bound) == bound) {
seed = (seed * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
return (int32_t)((bound * (seed >> 17)) >> 31);
}
int32_t bits, value;
#ifndef FAST_NEXT_INT
do {
#endif
seed = (seed * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
bits = seed >> 17;
value = bits % bound;
#ifndef FAST_NEXT_INT
} while (bits - value + (bound - 1) < 0);
#endif
return value;
}
__device__ uint64_t nextLong(uint64_t &seed) {
return ((uint64_t)next(seed, 32) << 32) + next(seed, 32);
}
__device__ float nextFloat(uint64_t &seed) {
return next(seed, 24) / ((float)(1 << 24));
}
__device__ double nextDouble(uint64_t &seed) {
return (((uint64_t)next(seed, 26) << 27) + next(seed, 27)) / (double)(1ULL << 53);
}
template <int n>
__device__ constexpr void advance(uint64_t &seed) {
uint64_t m = 1;
uint64_t a = 0;
for (int i = 0; i < n; i++) {
a = (a * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
m = (m * RANDOM__MULTIPLIER) & RANDOM__MASK;
}
seed = (seed * m + a) & RANDOM__MASK;
}
}
__shared__ uint8_t sharedMemory[256 * THREAD_COUNT];
#define SHARED_MEMORY_ACCESS(n) sharedMemory[(threadIdx.x << 8) | n]
#define CASTED_SHARED_MEMORY_ACCESS(n) ((double*)sharedMemory)[(threadIdx.x << 5) | n]
namespace Terrain {
struct OctaveData {
double xOffset;
double yOffset;
double zOffset;
uint8_t permutations[256];
};
struct NoiseData {
OctaveData noise1[16];
OctaveData noise2[16];
OctaveData noise3[8];
OctaveData noise6[16];
};
__device__ void initializeOctave(uint64_t &random, OctaveData *octaveData) {
octaveData->xOffset = Random::nextDouble(random) * 256.0;
octaveData->yOffset = Random::nextDouble(random) * 256.0;
octaveData->zOffset = Random::nextDouble(random) * 256.0;
for (int i = 0; i < 256; i++) {
SHARED_MEMORY_ACCESS(i) = i;
}
for (int i = 0; i < 256; i++) {
uint8_t k = Random::nextInt(random, 256 - i) + i;
uint8_t l = SHARED_MEMORY_ACCESS(i);
octaveData->permutations[i] = SHARED_MEMORY_ACCESS(k);
SHARED_MEMORY_ACCESS(k) = l;
}
}
__device__ void initializeNoise(uint64_t worldSeed, NoiseData* noiseData) {
uint64_t random = Random::setSeed(worldSeed);
for (int i = 0; i < 16; i++) { initializeOctave(random, &noiseData->noise1[i]); }
for (int i = 0; i < 16; i++) { initializeOctave(random, &noiseData->noise2[i]); }
for (int i = 0; i < 8; i++) { initializeOctave(random, &noiseData->noise3[i]); }
#ifndef FAST_NEXT_INT
for (int i = 0; i < 14; i++) {
Random::advance<7>(random);
for (int j = 1; j < 256; j++) {
Random::nextInt(random, 256 - j);
}
}
#else
Random::advance<3668>(random);
#endif
for (int i = 0; i < 16; i++) { initializeOctave(random, &noiseData->noise6[i]); }
}
__device__ double lerp(double t, double a, double b) {
return a + t * (b - a);
}
__device__ double func_4110_a(int i, double x, double z) {
switch (i & 0xF) {
case 0x0:
return x;
case 0x1:
return -x;
case 0x2:
return x;
case 0x3:
return -x;
case 0x4:
return x + z;
case 0x5:
return -x + z;
case 0x6:
return x - z;
case 0x7:
return -x - z;
case 0x8:
return z;
case 0x9:
return -z;
case 0xA:
return -z;
case 0xB:
return -z;
case 0xC:
return x;
case 0xD:
return z;
case 0xE:
return -x;
case 0xF:
return -z;
default:
return 0;
}
}
__device__ double grad(int i, double x, double y, double z) {
switch (i & 0xF) {
case 0x0:
return x + y;
case 0x1:
return -x + y;
case 0x2:
return x - y;
case 0x3:
return -x - y;
case 0x4:
return x + z;
case 0x5:
return -x + z;
case 0x6:
return x - z;
case 0x7:
return -x - z;
case 0x8:
return y + z;
case 0x9:
return -y + z;
case 0xA:
return y - z;
case 0xB:
return -y - z;
case 0xC:
return y + x;
case 0xD:
return -y + z;
case 0xE:
return y - x;
case 0xF:
return -y - z;
default:
return 0;
}
}
__device__ uint8_t getPermutation(const uint8_t* __restrict__ permutations, int n) {
return permutations[n & 0xFF];
}
__device__ double optimizedNoise2D(const OctaveData* __restrict__ octaveDatas, double baseX, double baseZ, int xIteration, int zIteration, double noiseScaleX, double noiseScaleZ, int numOctaves) {
double outputValue = 0;
double octavesFactor = 1.0;
for (int i = 0; i < numOctaves; i++) {
double noiseFactorX = noiseScaleX * octavesFactor;
double noiseFactorZ = noiseScaleZ * octavesFactor;
double startX = (double)baseX * octavesFactor * noiseScaleX;
double startZ = (double)baseZ * octavesFactor * noiseScaleZ;
double octaveWidth = 1.0 / octavesFactor;
double xCoord = startX + (double)xIteration * noiseFactorX + octaveDatas[i].xOffset;
int xCoordFloor = (int)xCoord;
if (xCoord < (double)xCoordFloor) {
xCoordFloor--;
}
int xUnitCube = xCoordFloor & 0xFF;
xCoord -= xCoordFloor;
double fadeX = xCoord * xCoord * xCoord * (xCoord * (xCoord * 6.0 - 15.0) + 10.0);
double zCoord = startZ + (double)zIteration * noiseFactorZ + octaveDatas[i].zOffset;
int zCoordFloor = (int)zCoord;
if (zCoord < (double)zCoordFloor) {
zCoordFloor--;
}
int zUnitCube = zCoordFloor & 0xFF;
zCoord -= zCoordFloor;
double fadeZ = zCoord * zCoord * zCoord * (zCoord * (zCoord * 6.0 - 15.0) + 10.0);
int l = getPermutation(octaveDatas[i].permutations, xUnitCube) + 0;
int j1 = getPermutation(octaveDatas[i].permutations, l) + zUnitCube;
int k1 = getPermutation(octaveDatas[i].permutations, xUnitCube + 1) + 0;
int l1 = getPermutation(octaveDatas[i].permutations, k1) + zUnitCube;
double d9 = lerp(fadeX, func_4110_a(getPermutation(octaveDatas[i].permutations, j1), xCoord, zCoord), grad(getPermutation(octaveDatas[i].permutations, l1), xCoord - 1.0, 0.0, zCoord));
double d11 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, j1 + 1), xCoord, 0.0, zCoord - 1.0), grad(getPermutation(octaveDatas[i].permutations, l1 + 1), xCoord - 1.0, 0.0, zCoord - 1.0));
double d23 = lerp(fadeZ, d9, d11);
outputValue += d23 * octaveWidth;
octavesFactor /= 2.0;
}
return outputValue;
}
__device__ void optimizedNoise3D(const OctaveData* __restrict__ octaveDatas, int sharedMemoryOffset, double baseX, double baseY, double baseZ, int xIteration, int zIteration, double noiseScaleX, double noiseScaleY, double noiseScaleZ, int numOctaves, int yIterationStart, int yIterations) {
double octavesFactor = 1.0;
for (int i = 0; i < numOctaves; i++) {
double noiseFactorX = noiseScaleX * octavesFactor;
double noiseFactorY = noiseScaleY * octavesFactor;
double noiseFactorZ = noiseScaleZ * octavesFactor;
double startX = (double)baseX * octavesFactor * noiseScaleX;
double startY = (double)baseY * octavesFactor * noiseScaleY;
double startZ = (double)baseZ * octavesFactor * noiseScaleZ;
int i2 = -1;
double d13 = 0.0;
double d15 = 0.0;
double d16 = 0.0;
double d18 = 0.0;
double octaveWidth = 1.0 / octavesFactor;
double xCoord = startX + (double)xIteration * noiseFactorX + octaveDatas[i].xOffset;
int xCoordFloor = (int)xCoord;
if (xCoord < (double)xCoordFloor) {
xCoordFloor--;
}
int xUnitCube = xCoordFloor & 0xFF;
xCoord -= xCoordFloor;
double fadeX = xCoord * xCoord * xCoord * (xCoord * (xCoord * 6.0 - 15.0) + 10.0);
double zCoord = startZ + (double)zIteration * noiseFactorZ + octaveDatas[i].zOffset;
int zCoordFloor = (int)zCoord;
if (zCoord < (double)zCoordFloor) {
zCoordFloor--;
}
int zUnitCube = zCoordFloor & 0xFF;
zCoord -= zCoordFloor;
double fadeZ = zCoord * zCoord * zCoord * (zCoord * (zCoord * 6.0 - 15.0) + 10.0);
for (int yIteration = 0; yIteration < yIterationStart + yIterations; yIteration++) {
double yCoord = startY + (double)yIteration * noiseFactorY + octaveDatas[i].yOffset;
int yCoordFloor = (int)yCoord;
if (yCoord < (double)yCoordFloor) {
yCoordFloor--;
}
int yUnitCube = yCoordFloor & 0xFF;
yCoord -= yCoordFloor;
double fadeY = yCoord * yCoord * yCoord * (yCoord * (yCoord * 6.0 - 15.0) + 10.0);
if (yIteration == 0 || yUnitCube != i2) {
i2 = yUnitCube;
int j2 = getPermutation(octaveDatas[i].permutations, xUnitCube) + yUnitCube;
int k2 = getPermutation(octaveDatas[i].permutations, j2) + zUnitCube;
int l2 = getPermutation(octaveDatas[i].permutations, j2 + 1) + zUnitCube;
int i3 = getPermutation(octaveDatas[i].permutations, xUnitCube + 1) + yUnitCube;
int k3 = getPermutation(octaveDatas[i].permutations, i3) + zUnitCube;
int l3 = getPermutation(octaveDatas[i].permutations, i3 + 1) + zUnitCube;
d13 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, k2), xCoord, yCoord, zCoord), grad(getPermutation(octaveDatas[i].permutations, k3), xCoord - 1.0, yCoord, zCoord));
d15 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, l2), xCoord, yCoord - 1.0, zCoord), grad(getPermutation(octaveDatas[i].permutations, l3), xCoord - 1.0, yCoord - 1.0, zCoord));
d16 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, k2 + 1), xCoord, yCoord, zCoord - 1.0), grad(getPermutation(octaveDatas[i].permutations, k3 + 1), xCoord - 1.0, yCoord, zCoord - 1.0));
d18 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, l2 + 1), xCoord, yCoord - 1.0, zCoord - 1.0), grad(getPermutation(octaveDatas[i].permutations, l3 + 1), xCoord - 1.0, yCoord - 1.0, zCoord - 1.0));
}
double d28 = lerp(fadeY, d13, d15);
double d29 = lerp(fadeY, d16, d18);
double d30 = lerp(fadeZ, d28, d29);
if (yIteration >= yIterationStart) {
CASTED_SHARED_MEMORY_ACCESS(yIteration - yIterationStart + sharedMemoryOffset) += d30 * octaveWidth;
}
}
octavesFactor /= 2.0;
}
}
__device__ void mixNoiseValues(int sharedMemoryOutputOffset, int sharedMemoryNoise1Offset, int sharedMemoryNoise2Offset, int sharedMemoryNoise3Offset, double noise6, int yAreaStart, int yAreas) {
int i2 = 0;
int j2 = 0;
float f1 = 0.37000000476837158203125f;
float f2 = -0.07500000298023223876953125;
double d2 = noise6 / 8000.0;
if (d2 < 0.0) {
d2 = -d2 * 0.29999999999999999;
}
d2 = d2 * 3.0 - 2.0;
if (d2 < 0.0) {
d2 /= 2.0;
if (d2 < -1.0) {
d2 = -1.0;
}
d2 /= 1.3999999999999999;
d2 /= 2.0;
} else {
if (d2 > 1.0) {
d2 = 1.0;
}
d2 /= 8.0;
}
j2++;
for (int k3 = yAreaStart; k3 < (yAreaStart + yAreas); k3++) {
double d3 = f2;
double d4 = f1;
d3 += d2 * 0.20000000000000001;
d3 = (d3 * (double)17) / 16.0;
double d5 = (double)17 / 2.0 + d3 * 4.0;
double d6 = 0.0;
double d7 = (((double)k3 - d5) * 12.0 * 128.0) / (double)(1 << 7) / d4;
if (d7 < 0.0) {
d7 *= 4.0;
}
double d8 = CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryNoise1Offset) / 512.0;
double d9 = CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryNoise2Offset) / 512.0;
double d10 = (CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryNoise3Offset) / 10.0 + 1.0) / 2.0;
if (d10 < 0.0) {
d6 = d8;
} else if (d10 > 1.0) {
d6 = d9;
} else {
d6 = d8 + (d9 - d8) * d10;
}
d6 -= d7;
if (k3 > 17 - 4) {
double d11 = (float)(k3 - (17 - 4)) / 3.0f;
d6 = d6 * (1.0 - d11) + -10.0 * d11;
}
CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryOutputOffset) = d6;
i2++;
}
}
__device__ void optimizedNoise(const NoiseData* __restrict__ noiseData, int sharedMemoryWriteOffset, int32_t x, int32_t y, int32_t z, int xArea, int zArea, int yAreaStart, int yAreas) {
double noise6Value = optimizedNoise2D(noiseData->noise6, (double)x, (double)z, xArea, zArea, 200.0, 200.0, 16);
for (int i = 0; i < yAreas; i++) {
CASTED_SHARED_MEMORY_ACCESS(i) = 0.0;
}
for (int i = 0; i < yAreas; i++) {
CASTED_SHARED_MEMORY_ACCESS(i + yAreas) = 0.0;
}
for (int i = 0; i < yAreas; i++) {
CASTED_SHARED_MEMORY_ACCESS(i + yAreas + yAreas) = 0.0;
}
optimizedNoise3D(noiseData->noise1, 0, (double)x, (double)y, (double)z, xArea, zArea, 684.41200000000003, 684.41200000000003, 684.41200000000003, 16, yAreaStart, yAreas);
optimizedNoise3D(noiseData->noise2, yAreas, (double)x, (double)y, (double)z, xArea, zArea, 684.41200000000003, 684.41200000000003, 684.41200000000003, 16, yAreaStart, yAreas);
optimizedNoise3D(noiseData->noise3, yAreas + yAreas, (double)x, (double)y, (double)z, xArea, zArea, 8.5551500000000011, 4.2775750000000006, 8.5551500000000011, 8, yAreaStart, yAreas);
mixNoiseValues(sharedMemoryWriteOffset, 0, yAreas, yAreas + yAreas, noise6Value, yAreaStart, yAreas);
}
__device__ void optimizedPointLerp(int sharedMemoryOffset, double bottomRight, double bottomLeft, double topRight, double topLeft, double bottomRight2, double bottomLeft2, double topRight2, double topLeft2, uint8_t baseHeight) {
double bottomRightDiff = (bottomRight2 - bottomRight) * 0.125;
double bottomLeftDiff = (bottomLeft2 - bottomLeft) * 0.125;
double topRightDiff = (topRight2 - topRight) * 0.125;
double topLeftDiff = (topLeft2 - topLeft) * 0.125;
for (int y = 0; y < 8; y++) {
double localBottomRight = bottomRight;
double localTopRight = topRight;
double localBottomRightDiff = (bottomLeft - bottomRight) * 0.25;
double localTopRightDiff = (topLeft - topRight) * 0.25;
for (int x = 0; x < 4; x++) {
double localHeight = localBottomRight;
double zStep = (localTopRight - localBottomRight) * 0.25;
localHeight -= zStep;
for (int z = 0; z < 4; z++) {
if ((localHeight += zStep) > 0.0) {
SHARED_MEMORY_ACCESS(x * 4 + z + sharedMemoryOffset) = baseHeight + y;
}
}
localBottomRight += localBottomRightDiff;
localTopRight += localTopRightDiff;
}
bottomRight += bottomRightDiff;
bottomLeft += bottomLeftDiff;
topRight += topRightDiff;
topLeft += topLeftDiff;
}
}
__device__ uint8_t optimizedMod4Lerp(double a, double b, uint8_t baseHeight) {
uint8_t height = 0;
double diff = (b - a) * 0.125;
for (int i = 0; i < 8; i++) {
if (a > 0) {
height = baseHeight + i;
}
a += diff;
}
return height;
}
}
__device__ bool checkTerrain(uint64_t worldSeed) {
Terrain::NoiseData noiseData;
Terrain::initializeNoise(worldSeed, &noiseData);
Terrain::optimizedNoise(&noiseData, 9, -22 * 4, 0, 2 * 4, 0, 2, 8, 2);
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(9), CASTED_SHARED_MEMORY_ACCESS(10), 64) != 65) {
return false;
}
Terrain::optimizedNoise(&noiseData, 11, -22 * 4, 0, 2 * 4, 1, 2, 8, 2);
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(11), CASTED_SHARED_MEMORY_ACCESS(12), 64) != 67) {
return false;
}
Terrain::optimizedNoise(&noiseData, 13, -22 * 4, 0, 2 * 4, 0, 3, 8, 2);
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(13), CASTED_SHARED_MEMORY_ACCESS(14), 64) != 67) {
return false;
}
Terrain::optimizedNoise(&noiseData, 15, -22 * 4, 0, 2 * 4, 1, 3, 7, 3);
if (CASTED_SHARED_MEMORY_ACCESS(16) > 0) { return false; }
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(15), CASTED_SHARED_MEMORY_ACCESS(16), 56) != 63) {
return false;
}
Terrain::optimizedNoise(&noiseData, 18, -22 * 4, 0, 2 * 4, 2, 3, 7, 2);
if (CASTED_SHARED_MEMORY_ACCESS(19) > 0) { return false; }
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(18), CASTED_SHARED_MEMORY_ACCESS(19), 56) != 63) {
return false;
}
int sharedMemoryOffset = 0;
for (int i = 0; i < 16; i++) {
SHARED_MEMORY_ACCESS(sharedMemoryOffset + i) = 0;
}
Terrain::optimizedPointLerp(sharedMemoryOffset, CASTED_SHARED_MEMORY_ACCESS(9), CASTED_SHARED_MEMORY_ACCESS(11), CASTED_SHARED_MEMORY_ACCESS(13), CASTED_SHARED_MEMORY_ACCESS(16), CASTED_SHARED_MEMORY_ACCESS(10), CASTED_SHARED_MEMORY_ACCESS(12), CASTED_SHARED_MEMORY_ACCESS(14), CASTED_SHARED_MEMORY_ACCESS(17), 64);
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 2) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 3) != 67) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 4) != 65) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 6) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 7) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 8) != 65) { return false; }
// if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 9) != 65) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 12) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 13) != 65) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 14) != 64) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 15) != 64) { return false; }
return true;
}
__device__ __managed__ uint32_t outputCounter = 0;
__device__ __managed__ uint64_t outputBuffer[100000];
__global__ void __launch_bounds__(THREAD_COUNT, 3) gpuWork(uint64_t seedOffset) {
uint64_t worldSeed = (uint64_t)blockIdx.x * (uint64_t)blockDim.x + (uint64_t)threadIdx.x + seedOffset;
if (!checkTerrain(worldSeed)) {
return;
}
uint32_t idx = atomicAdd(&outputCounter, 1);
outputBuffer[idx] = worldSeed;
}
uint64_t milliseconds() {
return (std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch())).count();
}
#define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line);
boinc_finish(code);
}
}
int calculateBlockSize(double threshold) {
hipLaunchKernelGGL(( gpuWork), dim3(1), dim3(THREAD_COUNT), 0, 0, 0);
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
GPU_ASSERT(hipPeekAtLastError());
outputCounter = 0;
int setBits = 0;
int lowestSetBit = 30;
for (int i = 0; i < 30; i++) {
int j;
for (j = 0; j < lowestSetBit; j++) {
int32_t newBits = setBits | (1 << j);
uint64_t startTime = milliseconds();
hipLaunchKernelGGL(( gpuWork), dim3(newBits), dim3(THREAD_COUNT), 0, 0, 0);
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
GPU_ASSERT(hipPeekAtLastError());
outputCounter = 0;
uint64_t endTime = milliseconds();
double elapsed = (double)(endTime - startTime) / 1000.0;
if (elapsed > threshold) {
if (j != 0) {
setBits |= (1 << (j - 1));
lowestSetBit = (j - 1);
} else if (j == 0) {
lowestSetBit = 0;
}
break;
}
}
if (lowestSetBit == 0) { break; }
if (j == lowestSetBit) {
setBits |= (1 << (j - 1));
lowestSetBit = (j - 1);
}
}
return setBits;
}
struct CheckpointData {
int lastIteration;
double elapsed;
int blockCount;
};
int main(int argc, char* argv[]) {
int taskNumber = 0;
int device = 0;
for (int i = 1; i < argc; i += 2) {
const char *param = argv[i];
if (strcmp(param, "-t") == 0 || strcmp(param, "--task") == 0) {
taskNumber = atoi(argv[i + 1]);
} else if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) {
device = atoi(argv[i + 1]);
}
}
int startIteration = 0;
double elapsed = 0;
int BLOCK_COUNT = 0;
fprintf(stderr, "Recieved work unit: %d.\n", taskNumber);
fflush(stderr);
#ifdef BOINC
BOINC_OPTIONS options;
boinc_options_defaults(options);
options.normal_thread_priority = true;
boinc_init_options(&options);
APP_INIT_DATA aid;
boinc_get_init_data(aid);
if (aid.gpu_device_num >= 0) {
fprintf(stderr, "boinc gpu: %d, cli gpu: %d.\n", aid.gpu_device_num, device);
device = aid.gpu_device_num;
} else {
fprintf(stderr, "cli gpu: %d.\n", device);
}
#endif
hipSetDevice(device);
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
GPU_ASSERT(hipPeekAtLastError());
FILE* checkpointFile = boinc_fopen("trailer_checkpoint.txt", "rb");
if (checkpointFile) {
boinc_begin_critical_section();
struct CheckpointData checkpointData;
fread(&checkpointData, sizeof(checkpointData), 1, checkpointFile);
startIteration = checkpointData.lastIteration + 1;
elapsed = checkpointData.elapsed;
BLOCK_COUNT = checkpointData.blockCount;
fclose(checkpointFile);
fprintf(stderr, "Loaded checkpoint %d %.2f %d.\n", startIteration, elapsed, BLOCK_COUNT);
fflush(stderr);
boinc_end_critical_section();
} else {
fprintf(stderr, "No checkpoint to load.\n");
}
if (BLOCK_COUNT == 0) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
int cc = deviceProp.major * 10 + deviceProp.minor;
if (cc <= 52) {
BLOCK_COUNT = calculateBlockSize(0.02);
} else if (deviceProp.major == 6) {
BLOCK_COUNT = calculateBlockSize(0.1);
} else if (deviceProp.major == 7) {
BLOCK_COUNT = calculateBlockSize(0.15);
} else if (deviceProp.major == 8) {
BLOCK_COUNT = calculateBlockSize(0.5);
} else {
fprintf(stderr, "Unrecognized compute capability.\n");
fflush(stderr);
boinc_finish(1);
}
fprintf(stderr, "Calculated block count: %d.\n", BLOCK_COUNT);
if (BLOCK_COUNT == 0) { BLOCK_COUNT = 1; }
fflush(stderr);
}
uint64_t GRID_WORK = (uint64_t)BLOCK_COUNT * THREAD_COUNT;
int ITERATIONS_NEEDED = ((TASK_WORK + GRID_WORK - 1) / GRID_WORK);
for (int i = startIteration; i < ITERATIONS_NEEDED; i++) {
uint64_t seedOffset = (TASK_WORK * taskNumber) + GRID_WORK * i;
uint64_t startTime = milliseconds();
hipLaunchKernelGGL(( gpuWork), dim3(BLOCK_COUNT), dim3(THREAD_COUNT), 0, 0, seedOffset);
GPU_ASSERT(hipPeekAtLastError());
GPU_ASSERT(hipDeviceSynchronize());
GPU_ASSERT(hipPeekAtLastError());
uint64_t endTime = milliseconds();
boinc_begin_critical_section();
double localElapsed = ((double)(endTime - startTime) / 1000);
elapsed += localElapsed;
if (boinc_time_to_checkpoint()) {
struct CheckpointData checkpointData;
checkpointData.lastIteration = i;
checkpointData.elapsed = elapsed;
checkpointData.blockCount = BLOCK_COUNT;
FILE* checkpointFile = boinc_fopen("trailer_checkpoint.txt", "wb");
fwrite(&checkpointData, sizeof(checkpointData), 1, checkpointFile);
fclose(checkpointFile);
boinc_checkpoint_completed();
}
if (outputCounter > 0) {
FILE *seedsOut = boinc_fopen("trailer_seeds.txt", "a");
for (int j = 0; j < outputCounter; j++) {
if (outputBuffer[j] < (TASK_WORK * (taskNumber + 1))) {
fprintf(seedsOut, "Seed: %llu\n", outputBuffer[j]);
}
}
fclose(seedsOut);
outputCounter = 0;
}
double fracDone = (double)i / ITERATIONS_NEEDED;
boinc_fraction_done(fracDone);
boinc_end_critical_section();
}
boinc_begin_critical_section();
FILE *seedsOut = boinc_fopen("trailer_seeds.txt", "a");
fclose(seedsOut);
fprintf(stderr, "Finished in %.2f seconds. Speed: %.2f/s.\n", elapsed, (double)TASK_WORK / elapsed);
fflush(stderr);
boinc_delete_file("trailer_checkpoint.txt");
boinc_end_critical_section();
boinc_finish(0);
}
|
f22c4f752de6d7487068825e879656307282214f.cu
|
#include <cinttypes>
#include <iostream>
#include <chrono>
#include <cuda.h>
#define THREAD_COUNT 128
#define TASK_WORK (1ULL << 30)
#define FAST_NEXT_INT
#ifdef BOINC
#include "boinc_api.h"
#if defined _WIN32 || defined _WIN64
#include "boinc_win.h"
#endif
#endif
#ifndef BOINC
#define boinc_fopen(file, mode) fopen(file, mode)
#define boinc_delete_file(file) remove(file)
#define boinc_begin_critical_section()
#define boinc_end_critical_section()
#define boinc_fraction_done(frac)
#define boinc_finish(s) exit(s)
#define boinc_time_to_checkpoint() true
#define boinc_checkpoint_completed()
#endif
namespace Random {
#define RANDOM__MULTIPLIER 25214903917ULL
#define RANDOM__MULTIPLIER_INVERSE 246154705703781ULL
#define RANDOM__ADDEND 11ULL
#define RANDOM__ADDEND_INVERSE 107048004364969ULL
#define RANDOM__MASK ((1ULL << 48) - 1)
__device__ uint64_t setSeed(uint64_t seed) {
return (seed ^ RANDOM__MULTIPLIER) & RANDOM__MASK;
}
__device__ int32_t next(uint64_t &seed, int bits) {
seed = (seed * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
return (int32_t)(seed >> (48 - bits));
}
__device__ int32_t nextInt(uint64_t &seed) {
return next(seed, 32);
}
__device__ int32_t nextInt(uint64_t &seed, int bound) {
if ((bound & -bound) == bound) {
seed = (seed * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
return (int32_t)((bound * (seed >> 17)) >> 31);
}
int32_t bits, value;
#ifndef FAST_NEXT_INT
do {
#endif
seed = (seed * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
bits = seed >> 17;
value = bits % bound;
#ifndef FAST_NEXT_INT
} while (bits - value + (bound - 1) < 0);
#endif
return value;
}
__device__ uint64_t nextLong(uint64_t &seed) {
return ((uint64_t)next(seed, 32) << 32) + next(seed, 32);
}
__device__ float nextFloat(uint64_t &seed) {
return next(seed, 24) / ((float)(1 << 24));
}
__device__ double nextDouble(uint64_t &seed) {
return (((uint64_t)next(seed, 26) << 27) + next(seed, 27)) / (double)(1ULL << 53);
}
template <int n>
__device__ constexpr void advance(uint64_t &seed) {
uint64_t m = 1;
uint64_t a = 0;
for (int i = 0; i < n; i++) {
a = (a * RANDOM__MULTIPLIER + RANDOM__ADDEND) & RANDOM__MASK;
m = (m * RANDOM__MULTIPLIER) & RANDOM__MASK;
}
seed = (seed * m + a) & RANDOM__MASK;
}
}
__shared__ uint8_t sharedMemory[256 * THREAD_COUNT];
#define SHARED_MEMORY_ACCESS(n) sharedMemory[(threadIdx.x << 8) | n]
#define CASTED_SHARED_MEMORY_ACCESS(n) ((double*)sharedMemory)[(threadIdx.x << 5) | n]
namespace Terrain {
struct OctaveData {
double xOffset;
double yOffset;
double zOffset;
uint8_t permutations[256];
};
struct NoiseData {
OctaveData noise1[16];
OctaveData noise2[16];
OctaveData noise3[8];
OctaveData noise6[16];
};
__device__ void initializeOctave(uint64_t &random, OctaveData *octaveData) {
octaveData->xOffset = Random::nextDouble(random) * 256.0;
octaveData->yOffset = Random::nextDouble(random) * 256.0;
octaveData->zOffset = Random::nextDouble(random) * 256.0;
for (int i = 0; i < 256; i++) {
SHARED_MEMORY_ACCESS(i) = i;
}
for (int i = 0; i < 256; i++) {
uint8_t k = Random::nextInt(random, 256 - i) + i;
uint8_t l = SHARED_MEMORY_ACCESS(i);
octaveData->permutations[i] = SHARED_MEMORY_ACCESS(k);
SHARED_MEMORY_ACCESS(k) = l;
}
}
__device__ void initializeNoise(uint64_t worldSeed, NoiseData* noiseData) {
uint64_t random = Random::setSeed(worldSeed);
for (int i = 0; i < 16; i++) { initializeOctave(random, &noiseData->noise1[i]); }
for (int i = 0; i < 16; i++) { initializeOctave(random, &noiseData->noise2[i]); }
for (int i = 0; i < 8; i++) { initializeOctave(random, &noiseData->noise3[i]); }
#ifndef FAST_NEXT_INT
for (int i = 0; i < 14; i++) {
Random::advance<7>(random);
for (int j = 1; j < 256; j++) {
Random::nextInt(random, 256 - j);
}
}
#else
Random::advance<3668>(random);
#endif
for (int i = 0; i < 16; i++) { initializeOctave(random, &noiseData->noise6[i]); }
}
__device__ double lerp(double t, double a, double b) {
return a + t * (b - a);
}
__device__ double func_4110_a(int i, double x, double z) {
switch (i & 0xF) {
case 0x0:
return x;
case 0x1:
return -x;
case 0x2:
return x;
case 0x3:
return -x;
case 0x4:
return x + z;
case 0x5:
return -x + z;
case 0x6:
return x - z;
case 0x7:
return -x - z;
case 0x8:
return z;
case 0x9:
return -z;
case 0xA:
return -z;
case 0xB:
return -z;
case 0xC:
return x;
case 0xD:
return z;
case 0xE:
return -x;
case 0xF:
return -z;
default:
return 0;
}
}
__device__ double grad(int i, double x, double y, double z) {
switch (i & 0xF) {
case 0x0:
return x + y;
case 0x1:
return -x + y;
case 0x2:
return x - y;
case 0x3:
return -x - y;
case 0x4:
return x + z;
case 0x5:
return -x + z;
case 0x6:
return x - z;
case 0x7:
return -x - z;
case 0x8:
return y + z;
case 0x9:
return -y + z;
case 0xA:
return y - z;
case 0xB:
return -y - z;
case 0xC:
return y + x;
case 0xD:
return -y + z;
case 0xE:
return y - x;
case 0xF:
return -y - z;
default:
return 0;
}
}
__device__ uint8_t getPermutation(const uint8_t* __restrict__ permutations, int n) {
return permutations[n & 0xFF];
}
__device__ double optimizedNoise2D(const OctaveData* __restrict__ octaveDatas, double baseX, double baseZ, int xIteration, int zIteration, double noiseScaleX, double noiseScaleZ, int numOctaves) {
double outputValue = 0;
double octavesFactor = 1.0;
for (int i = 0; i < numOctaves; i++) {
double noiseFactorX = noiseScaleX * octavesFactor;
double noiseFactorZ = noiseScaleZ * octavesFactor;
double startX = (double)baseX * octavesFactor * noiseScaleX;
double startZ = (double)baseZ * octavesFactor * noiseScaleZ;
double octaveWidth = 1.0 / octavesFactor;
double xCoord = startX + (double)xIteration * noiseFactorX + octaveDatas[i].xOffset;
int xCoordFloor = (int)xCoord;
if (xCoord < (double)xCoordFloor) {
xCoordFloor--;
}
int xUnitCube = xCoordFloor & 0xFF;
xCoord -= xCoordFloor;
double fadeX = xCoord * xCoord * xCoord * (xCoord * (xCoord * 6.0 - 15.0) + 10.0);
double zCoord = startZ + (double)zIteration * noiseFactorZ + octaveDatas[i].zOffset;
int zCoordFloor = (int)zCoord;
if (zCoord < (double)zCoordFloor) {
zCoordFloor--;
}
int zUnitCube = zCoordFloor & 0xFF;
zCoord -= zCoordFloor;
double fadeZ = zCoord * zCoord * zCoord * (zCoord * (zCoord * 6.0 - 15.0) + 10.0);
int l = getPermutation(octaveDatas[i].permutations, xUnitCube) + 0;
int j1 = getPermutation(octaveDatas[i].permutations, l) + zUnitCube;
int k1 = getPermutation(octaveDatas[i].permutations, xUnitCube + 1) + 0;
int l1 = getPermutation(octaveDatas[i].permutations, k1) + zUnitCube;
double d9 = lerp(fadeX, func_4110_a(getPermutation(octaveDatas[i].permutations, j1), xCoord, zCoord), grad(getPermutation(octaveDatas[i].permutations, l1), xCoord - 1.0, 0.0, zCoord));
double d11 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, j1 + 1), xCoord, 0.0, zCoord - 1.0), grad(getPermutation(octaveDatas[i].permutations, l1 + 1), xCoord - 1.0, 0.0, zCoord - 1.0));
double d23 = lerp(fadeZ, d9, d11);
outputValue += d23 * octaveWidth;
octavesFactor /= 2.0;
}
return outputValue;
}
__device__ void optimizedNoise3D(const OctaveData* __restrict__ octaveDatas, int sharedMemoryOffset, double baseX, double baseY, double baseZ, int xIteration, int zIteration, double noiseScaleX, double noiseScaleY, double noiseScaleZ, int numOctaves, int yIterationStart, int yIterations) {
double octavesFactor = 1.0;
for (int i = 0; i < numOctaves; i++) {
double noiseFactorX = noiseScaleX * octavesFactor;
double noiseFactorY = noiseScaleY * octavesFactor;
double noiseFactorZ = noiseScaleZ * octavesFactor;
double startX = (double)baseX * octavesFactor * noiseScaleX;
double startY = (double)baseY * octavesFactor * noiseScaleY;
double startZ = (double)baseZ * octavesFactor * noiseScaleZ;
int i2 = -1;
double d13 = 0.0;
double d15 = 0.0;
double d16 = 0.0;
double d18 = 0.0;
double octaveWidth = 1.0 / octavesFactor;
double xCoord = startX + (double)xIteration * noiseFactorX + octaveDatas[i].xOffset;
int xCoordFloor = (int)xCoord;
if (xCoord < (double)xCoordFloor) {
xCoordFloor--;
}
int xUnitCube = xCoordFloor & 0xFF;
xCoord -= xCoordFloor;
double fadeX = xCoord * xCoord * xCoord * (xCoord * (xCoord * 6.0 - 15.0) + 10.0);
double zCoord = startZ + (double)zIteration * noiseFactorZ + octaveDatas[i].zOffset;
int zCoordFloor = (int)zCoord;
if (zCoord < (double)zCoordFloor) {
zCoordFloor--;
}
int zUnitCube = zCoordFloor & 0xFF;
zCoord -= zCoordFloor;
double fadeZ = zCoord * zCoord * zCoord * (zCoord * (zCoord * 6.0 - 15.0) + 10.0);
for (int yIteration = 0; yIteration < yIterationStart + yIterations; yIteration++) {
double yCoord = startY + (double)yIteration * noiseFactorY + octaveDatas[i].yOffset;
int yCoordFloor = (int)yCoord;
if (yCoord < (double)yCoordFloor) {
yCoordFloor--;
}
int yUnitCube = yCoordFloor & 0xFF;
yCoord -= yCoordFloor;
double fadeY = yCoord * yCoord * yCoord * (yCoord * (yCoord * 6.0 - 15.0) + 10.0);
if (yIteration == 0 || yUnitCube != i2) {
i2 = yUnitCube;
int j2 = getPermutation(octaveDatas[i].permutations, xUnitCube) + yUnitCube;
int k2 = getPermutation(octaveDatas[i].permutations, j2) + zUnitCube;
int l2 = getPermutation(octaveDatas[i].permutations, j2 + 1) + zUnitCube;
int i3 = getPermutation(octaveDatas[i].permutations, xUnitCube + 1) + yUnitCube;
int k3 = getPermutation(octaveDatas[i].permutations, i3) + zUnitCube;
int l3 = getPermutation(octaveDatas[i].permutations, i3 + 1) + zUnitCube;
d13 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, k2), xCoord, yCoord, zCoord), grad(getPermutation(octaveDatas[i].permutations, k3), xCoord - 1.0, yCoord, zCoord));
d15 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, l2), xCoord, yCoord - 1.0, zCoord), grad(getPermutation(octaveDatas[i].permutations, l3), xCoord - 1.0, yCoord - 1.0, zCoord));
d16 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, k2 + 1), xCoord, yCoord, zCoord - 1.0), grad(getPermutation(octaveDatas[i].permutations, k3 + 1), xCoord - 1.0, yCoord, zCoord - 1.0));
d18 = lerp(fadeX, grad(getPermutation(octaveDatas[i].permutations, l2 + 1), xCoord, yCoord - 1.0, zCoord - 1.0), grad(getPermutation(octaveDatas[i].permutations, l3 + 1), xCoord - 1.0, yCoord - 1.0, zCoord - 1.0));
}
double d28 = lerp(fadeY, d13, d15);
double d29 = lerp(fadeY, d16, d18);
double d30 = lerp(fadeZ, d28, d29);
if (yIteration >= yIterationStart) {
CASTED_SHARED_MEMORY_ACCESS(yIteration - yIterationStart + sharedMemoryOffset) += d30 * octaveWidth;
}
}
octavesFactor /= 2.0;
}
}
__device__ void mixNoiseValues(int sharedMemoryOutputOffset, int sharedMemoryNoise1Offset, int sharedMemoryNoise2Offset, int sharedMemoryNoise3Offset, double noise6, int yAreaStart, int yAreas) {
int i2 = 0;
int j2 = 0;
float f1 = 0.37000000476837158203125f;
float f2 = -0.07500000298023223876953125;
double d2 = noise6 / 8000.0;
if (d2 < 0.0) {
d2 = -d2 * 0.29999999999999999;
}
d2 = d2 * 3.0 - 2.0;
if (d2 < 0.0) {
d2 /= 2.0;
if (d2 < -1.0) {
d2 = -1.0;
}
d2 /= 1.3999999999999999;
d2 /= 2.0;
} else {
if (d2 > 1.0) {
d2 = 1.0;
}
d2 /= 8.0;
}
j2++;
for (int k3 = yAreaStart; k3 < (yAreaStart + yAreas); k3++) {
double d3 = f2;
double d4 = f1;
d3 += d2 * 0.20000000000000001;
d3 = (d3 * (double)17) / 16.0;
double d5 = (double)17 / 2.0 + d3 * 4.0;
double d6 = 0.0;
double d7 = (((double)k3 - d5) * 12.0 * 128.0) / (double)(1 << 7) / d4;
if (d7 < 0.0) {
d7 *= 4.0;
}
double d8 = CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryNoise1Offset) / 512.0;
double d9 = CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryNoise2Offset) / 512.0;
double d10 = (CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryNoise3Offset) / 10.0 + 1.0) / 2.0;
if (d10 < 0.0) {
d6 = d8;
} else if (d10 > 1.0) {
d6 = d9;
} else {
d6 = d8 + (d9 - d8) * d10;
}
d6 -= d7;
if (k3 > 17 - 4) {
double d11 = (float)(k3 - (17 - 4)) / 3.0f;
d6 = d6 * (1.0 - d11) + -10.0 * d11;
}
CASTED_SHARED_MEMORY_ACCESS(i2 + sharedMemoryOutputOffset) = d6;
i2++;
}
}
__device__ void optimizedNoise(const NoiseData* __restrict__ noiseData, int sharedMemoryWriteOffset, int32_t x, int32_t y, int32_t z, int xArea, int zArea, int yAreaStart, int yAreas) {
double noise6Value = optimizedNoise2D(noiseData->noise6, (double)x, (double)z, xArea, zArea, 200.0, 200.0, 16);
for (int i = 0; i < yAreas; i++) {
CASTED_SHARED_MEMORY_ACCESS(i) = 0.0;
}
for (int i = 0; i < yAreas; i++) {
CASTED_SHARED_MEMORY_ACCESS(i + yAreas) = 0.0;
}
for (int i = 0; i < yAreas; i++) {
CASTED_SHARED_MEMORY_ACCESS(i + yAreas + yAreas) = 0.0;
}
optimizedNoise3D(noiseData->noise1, 0, (double)x, (double)y, (double)z, xArea, zArea, 684.41200000000003, 684.41200000000003, 684.41200000000003, 16, yAreaStart, yAreas);
optimizedNoise3D(noiseData->noise2, yAreas, (double)x, (double)y, (double)z, xArea, zArea, 684.41200000000003, 684.41200000000003, 684.41200000000003, 16, yAreaStart, yAreas);
optimizedNoise3D(noiseData->noise3, yAreas + yAreas, (double)x, (double)y, (double)z, xArea, zArea, 8.5551500000000011, 4.2775750000000006, 8.5551500000000011, 8, yAreaStart, yAreas);
mixNoiseValues(sharedMemoryWriteOffset, 0, yAreas, yAreas + yAreas, noise6Value, yAreaStart, yAreas);
}
__device__ void optimizedPointLerp(int sharedMemoryOffset, double bottomRight, double bottomLeft, double topRight, double topLeft, double bottomRight2, double bottomLeft2, double topRight2, double topLeft2, uint8_t baseHeight) {
double bottomRightDiff = (bottomRight2 - bottomRight) * 0.125;
double bottomLeftDiff = (bottomLeft2 - bottomLeft) * 0.125;
double topRightDiff = (topRight2 - topRight) * 0.125;
double topLeftDiff = (topLeft2 - topLeft) * 0.125;
for (int y = 0; y < 8; y++) {
double localBottomRight = bottomRight;
double localTopRight = topRight;
double localBottomRightDiff = (bottomLeft - bottomRight) * 0.25;
double localTopRightDiff = (topLeft - topRight) * 0.25;
for (int x = 0; x < 4; x++) {
double localHeight = localBottomRight;
double zStep = (localTopRight - localBottomRight) * 0.25;
localHeight -= zStep;
for (int z = 0; z < 4; z++) {
if ((localHeight += zStep) > 0.0) {
SHARED_MEMORY_ACCESS(x * 4 + z + sharedMemoryOffset) = baseHeight + y;
}
}
localBottomRight += localBottomRightDiff;
localTopRight += localTopRightDiff;
}
bottomRight += bottomRightDiff;
bottomLeft += bottomLeftDiff;
topRight += topRightDiff;
topLeft += topLeftDiff;
}
}
__device__ uint8_t optimizedMod4Lerp(double a, double b, uint8_t baseHeight) {
uint8_t height = 0;
double diff = (b - a) * 0.125;
for (int i = 0; i < 8; i++) {
if (a > 0) {
height = baseHeight + i;
}
a += diff;
}
return height;
}
}
__device__ bool checkTerrain(uint64_t worldSeed) {
Terrain::NoiseData noiseData;
Terrain::initializeNoise(worldSeed, &noiseData);
Terrain::optimizedNoise(&noiseData, 9, -22 * 4, 0, 2 * 4, 0, 2, 8, 2);
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(9), CASTED_SHARED_MEMORY_ACCESS(10), 64) != 65) {
return false;
}
Terrain::optimizedNoise(&noiseData, 11, -22 * 4, 0, 2 * 4, 1, 2, 8, 2);
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(11), CASTED_SHARED_MEMORY_ACCESS(12), 64) != 67) {
return false;
}
Terrain::optimizedNoise(&noiseData, 13, -22 * 4, 0, 2 * 4, 0, 3, 8, 2);
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(13), CASTED_SHARED_MEMORY_ACCESS(14), 64) != 67) {
return false;
}
Terrain::optimizedNoise(&noiseData, 15, -22 * 4, 0, 2 * 4, 1, 3, 7, 3);
if (CASTED_SHARED_MEMORY_ACCESS(16) > 0) { return false; }
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(15), CASTED_SHARED_MEMORY_ACCESS(16), 56) != 63) {
return false;
}
Terrain::optimizedNoise(&noiseData, 18, -22 * 4, 0, 2 * 4, 2, 3, 7, 2);
if (CASTED_SHARED_MEMORY_ACCESS(19) > 0) { return false; }
if (Terrain::optimizedMod4Lerp(CASTED_SHARED_MEMORY_ACCESS(18), CASTED_SHARED_MEMORY_ACCESS(19), 56) != 63) {
return false;
}
int sharedMemoryOffset = 0;
for (int i = 0; i < 16; i++) {
SHARED_MEMORY_ACCESS(sharedMemoryOffset + i) = 0;
}
Terrain::optimizedPointLerp(sharedMemoryOffset, CASTED_SHARED_MEMORY_ACCESS(9), CASTED_SHARED_MEMORY_ACCESS(11), CASTED_SHARED_MEMORY_ACCESS(13), CASTED_SHARED_MEMORY_ACCESS(16), CASTED_SHARED_MEMORY_ACCESS(10), CASTED_SHARED_MEMORY_ACCESS(12), CASTED_SHARED_MEMORY_ACCESS(14), CASTED_SHARED_MEMORY_ACCESS(17), 64);
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 2) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 3) != 67) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 4) != 65) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 6) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 7) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 8) != 65) { return false; }
// if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 9) != 65) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 12) != 66) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 13) != 65) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 14) != 64) { return false; }
if (SHARED_MEMORY_ACCESS(sharedMemoryOffset + 15) != 64) { return false; }
return true;
}
__device__ __managed__ uint32_t outputCounter = 0;
__device__ __managed__ uint64_t outputBuffer[100000];
__global__ void __launch_bounds__(THREAD_COUNT, 3) gpuWork(uint64_t seedOffset) {
uint64_t worldSeed = (uint64_t)blockIdx.x * (uint64_t)blockDim.x + (uint64_t)threadIdx.x + seedOffset;
if (!checkTerrain(worldSeed)) {
return;
}
uint32_t idx = atomicAdd(&outputCounter, 1);
outputBuffer[idx] = worldSeed;
}
uint64_t milliseconds() {
return (std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch())).count();
}
#define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__)
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line);
boinc_finish(code);
}
}
int calculateBlockSize(double threshold) {
gpuWork<<<1, THREAD_COUNT>>>(0);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
GPU_ASSERT(cudaPeekAtLastError());
outputCounter = 0;
int setBits = 0;
int lowestSetBit = 30;
for (int i = 0; i < 30; i++) {
int j;
for (j = 0; j < lowestSetBit; j++) {
int32_t newBits = setBits | (1 << j);
uint64_t startTime = milliseconds();
gpuWork<<<newBits, THREAD_COUNT>>>(0);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
GPU_ASSERT(cudaPeekAtLastError());
outputCounter = 0;
uint64_t endTime = milliseconds();
double elapsed = (double)(endTime - startTime) / 1000.0;
if (elapsed > threshold) {
if (j != 0) {
setBits |= (1 << (j - 1));
lowestSetBit = (j - 1);
} else if (j == 0) {
lowestSetBit = 0;
}
break;
}
}
if (lowestSetBit == 0) { break; }
if (j == lowestSetBit) {
setBits |= (1 << (j - 1));
lowestSetBit = (j - 1);
}
}
return setBits;
}
struct CheckpointData {
int lastIteration;
double elapsed;
int blockCount;
};
int main(int argc, char* argv[]) {
int taskNumber = 0;
int device = 0;
for (int i = 1; i < argc; i += 2) {
const char *param = argv[i];
if (strcmp(param, "-t") == 0 || strcmp(param, "--task") == 0) {
taskNumber = atoi(argv[i + 1]);
} else if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) {
device = atoi(argv[i + 1]);
}
}
int startIteration = 0;
double elapsed = 0;
int BLOCK_COUNT = 0;
fprintf(stderr, "Recieved work unit: %d.\n", taskNumber);
fflush(stderr);
#ifdef BOINC
BOINC_OPTIONS options;
boinc_options_defaults(options);
options.normal_thread_priority = true;
boinc_init_options(&options);
APP_INIT_DATA aid;
boinc_get_init_data(aid);
if (aid.gpu_device_num >= 0) {
fprintf(stderr, "boinc gpu: %d, cli gpu: %d.\n", aid.gpu_device_num, device);
device = aid.gpu_device_num;
} else {
fprintf(stderr, "cli gpu: %d.\n", device);
}
#endif
cudaSetDevice(device);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
GPU_ASSERT(cudaPeekAtLastError());
FILE* checkpointFile = boinc_fopen("trailer_checkpoint.txt", "rb");
if (checkpointFile) {
boinc_begin_critical_section();
struct CheckpointData checkpointData;
fread(&checkpointData, sizeof(checkpointData), 1, checkpointFile);
startIteration = checkpointData.lastIteration + 1;
elapsed = checkpointData.elapsed;
BLOCK_COUNT = checkpointData.blockCount;
fclose(checkpointFile);
fprintf(stderr, "Loaded checkpoint %d %.2f %d.\n", startIteration, elapsed, BLOCK_COUNT);
fflush(stderr);
boinc_end_critical_section();
} else {
fprintf(stderr, "No checkpoint to load.\n");
}
if (BLOCK_COUNT == 0) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
int cc = deviceProp.major * 10 + deviceProp.minor;
if (cc <= 52) {
BLOCK_COUNT = calculateBlockSize(0.02);
} else if (deviceProp.major == 6) {
BLOCK_COUNT = calculateBlockSize(0.1);
} else if (deviceProp.major == 7) {
BLOCK_COUNT = calculateBlockSize(0.15);
} else if (deviceProp.major == 8) {
BLOCK_COUNT = calculateBlockSize(0.5);
} else {
fprintf(stderr, "Unrecognized compute capability.\n");
fflush(stderr);
boinc_finish(1);
}
fprintf(stderr, "Calculated block count: %d.\n", BLOCK_COUNT);
if (BLOCK_COUNT == 0) { BLOCK_COUNT = 1; }
fflush(stderr);
}
uint64_t GRID_WORK = (uint64_t)BLOCK_COUNT * THREAD_COUNT;
int ITERATIONS_NEEDED = ((TASK_WORK + GRID_WORK - 1) / GRID_WORK);
for (int i = startIteration; i < ITERATIONS_NEEDED; i++) {
uint64_t seedOffset = (TASK_WORK * taskNumber) + GRID_WORK * i;
uint64_t startTime = milliseconds();
gpuWork<<<BLOCK_COUNT, THREAD_COUNT>>>(seedOffset);
GPU_ASSERT(cudaPeekAtLastError());
GPU_ASSERT(cudaDeviceSynchronize());
GPU_ASSERT(cudaPeekAtLastError());
uint64_t endTime = milliseconds();
boinc_begin_critical_section();
double localElapsed = ((double)(endTime - startTime) / 1000);
elapsed += localElapsed;
if (boinc_time_to_checkpoint()) {
struct CheckpointData checkpointData;
checkpointData.lastIteration = i;
checkpointData.elapsed = elapsed;
checkpointData.blockCount = BLOCK_COUNT;
FILE* checkpointFile = boinc_fopen("trailer_checkpoint.txt", "wb");
fwrite(&checkpointData, sizeof(checkpointData), 1, checkpointFile);
fclose(checkpointFile);
boinc_checkpoint_completed();
}
if (outputCounter > 0) {
FILE *seedsOut = boinc_fopen("trailer_seeds.txt", "a");
for (int j = 0; j < outputCounter; j++) {
if (outputBuffer[j] < (TASK_WORK * (taskNumber + 1))) {
fprintf(seedsOut, "Seed: %llu\n", outputBuffer[j]);
}
}
fclose(seedsOut);
outputCounter = 0;
}
double fracDone = (double)i / ITERATIONS_NEEDED;
boinc_fraction_done(fracDone);
boinc_end_critical_section();
}
boinc_begin_critical_section();
FILE *seedsOut = boinc_fopen("trailer_seeds.txt", "a");
fclose(seedsOut);
fprintf(stderr, "Finished in %.2f seconds. Speed: %.2f/s.\n", elapsed, (double)TASK_WORK / elapsed);
fflush(stderr);
boinc_delete_file("trailer_checkpoint.txt");
boinc_end_critical_section();
boinc_finish(0);
}
|
85960219fdf22fafa9092a087f4f2c725e08f60f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2022 by Contributors
* \file aft_obj.cu
* \brief Definition of AFT loss for survival analysis.
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <vector>
#include <limits>
#include <memory>
#include <utility>
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/logging.h"
#include "xgboost/objective.h"
#include "../common/transform.h"
#include "../common/survival_util.h"
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(aft_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
class AFTObj : public ObjFunction {
public:
void Configure(Args const& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kSurvival; }
template <typename Distribution>
void GetGradientImpl(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
HostDeviceVector<GradientPair> *out_gpair,
size_t ndata, int device, bool is_null_weight,
float aft_loss_distribution_scale) {
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels_lower_bound,
common::Span<const bst_float> _labels_upper_bound,
common::Span<const bst_float> _weights) {
const double pred = static_cast<double>(_preds[_idx]);
const double label_lower_bound = static_cast<double>(_labels_lower_bound[_idx]);
const double label_upper_bound = static_cast<double>(_labels_upper_bound[_idx]);
const float grad = static_cast<float>(
AFTLoss<Distribution>::Gradient(label_lower_bound, label_upper_bound,
pred, aft_loss_distribution_scale));
const float hess = static_cast<float>(
AFTLoss<Distribution>::Hessian(label_lower_bound, label_upper_bound,
pred, aft_loss_distribution_scale));
const bst_float w = is_null_weight ? 1.0f : _weights[_idx];
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
out_gpair, &preds, &info.labels_lower_bound_, &info.labels_upper_bound_,
&info.weights_);
}
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
const size_t ndata = preds.Size();
CHECK_EQ(info.labels_lower_bound_.Size(), ndata);
CHECK_EQ(info.labels_upper_bound_.Size(), ndata);
out_gpair->Resize(ndata);
const int device = ctx_->gpu_id;
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
GetGradientImpl<common::NormalDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
case common::ProbabilityDistributionType::kLogistic:
GetGradientImpl<common::LogisticDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
case common::ProbabilityDistributionType::kExtreme:
GetGradientImpl<common::ExtremeDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
default:
LOG(FATAL) << "Unrecognized distribution";
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
// Trees give us a prediction in log scale, so exponentiate
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = exp(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float>* /*io_preds*/) override {
// do nothing here, since the AFT metric expects untransformed prediction score
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "aft-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:aft");
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["aft_loss_param"], ¶m_);
}
private:
AFTParam param_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(AFTObj, "survival:aft")
.describe("AFT loss function")
.set_body([]() { return new AFTObj(); });
} // namespace obj
} // namespace xgboost
|
85960219fdf22fafa9092a087f4f2c725e08f60f.cu
|
/*!
* Copyright 2019-2022 by Contributors
* \file aft_obj.cu
* \brief Definition of AFT loss for survival analysis.
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <vector>
#include <limits>
#include <memory>
#include <utility>
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/logging.h"
#include "xgboost/objective.h"
#include "../common/transform.h"
#include "../common/survival_util.h"
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace obj {
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(aft_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
class AFTObj : public ObjFunction {
public:
void Configure(Args const& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kSurvival; }
template <typename Distribution>
void GetGradientImpl(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info,
HostDeviceVector<GradientPair> *out_gpair,
size_t ndata, int device, bool is_null_weight,
float aft_loss_distribution_scale) {
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels_lower_bound,
common::Span<const bst_float> _labels_upper_bound,
common::Span<const bst_float> _weights) {
const double pred = static_cast<double>(_preds[_idx]);
const double label_lower_bound = static_cast<double>(_labels_lower_bound[_idx]);
const double label_upper_bound = static_cast<double>(_labels_upper_bound[_idx]);
const float grad = static_cast<float>(
AFTLoss<Distribution>::Gradient(label_lower_bound, label_upper_bound,
pred, aft_loss_distribution_scale));
const float hess = static_cast<float>(
AFTLoss<Distribution>::Hessian(label_lower_bound, label_upper_bound,
pred, aft_loss_distribution_scale));
const bst_float w = is_null_weight ? 1.0f : _weights[_idx];
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
out_gpair, &preds, &info.labels_lower_bound_, &info.labels_upper_bound_,
&info.weights_);
}
void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
const size_t ndata = preds.Size();
CHECK_EQ(info.labels_lower_bound_.Size(), ndata);
CHECK_EQ(info.labels_upper_bound_.Size(), ndata);
out_gpair->Resize(ndata);
const int device = ctx_->gpu_id;
const float aft_loss_distribution_scale = param_.aft_loss_distribution_scale;
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
GetGradientImpl<common::NormalDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
case common::ProbabilityDistributionType::kLogistic:
GetGradientImpl<common::LogisticDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
case common::ProbabilityDistributionType::kExtreme:
GetGradientImpl<common::ExtremeDistribution>(preds, info, out_gpair, ndata, device,
is_null_weight, aft_loss_distribution_scale);
break;
default:
LOG(FATAL) << "Unrecognized distribution";
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
// Trees give us a prediction in log scale, so exponentiate
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = exp(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float>* /*io_preds*/) override {
// do nothing here, since the AFT metric expects untransformed prediction score
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "aft-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:aft");
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["aft_loss_param"], ¶m_);
}
private:
AFTParam param_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(AFTObj, "survival:aft")
.describe("AFT loss function")
.set_body([]() { return new AFTObj(); });
} // namespace obj
} // namespace xgboost
|
cf67a970dd9b792a05c54e11af4a42fa11ac7a11.hip
|
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include "relulayer.h"
#include "hip/hip_runtime.h"
#include "math.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdexcept>
__global__ void ReluLayer_Forward_cu(ReluNode *node, double *previousLayerForward, double *out)
{
if (previousLayerForward[blockIdx.x] < 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = previousLayerForward[blockIdx.x];
}
}
__global__ void ReluLayer_Backward_cu(ReluNode *node, double *forward, double* nextlayerBackward, double *out, double learnRate)
{
if (forward[blockIdx.x] <= 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = nextlayerBackward[blockIdx.x];
}
}
void ReluLayer_Forward(ReluNode *node, double *previousLayerForward, double *output, int nodeCount)
{
hipLaunchKernelGGL(( ReluLayer_Forward_cu) , dim3(nodeCount), dim3(1) , 0, 0, node, previousLayerForward, output);
if (hipGetLastError() != hipError_t::hipSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA method returned an error");
}
if (hipDeviceSynchronize() != hipError_t::hipSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA syncronize returned an error");
}
}
void ReluLayer_Backward(ReluNode *node, double *forward, double* nextlayerBackward, double *output, int nodeCount, double learnRate)
{
hipLaunchKernelGGL(( ReluLayer_Backward_cu) , dim3(nodeCount), dim3(1) , 0, 0, node, forward, nextlayerBackward, output, learnRate);
if (hipGetLastError() != hipError_t::hipSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA method returned an error");
}
if (hipDeviceSynchronize() != hipError_t::hipSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA syncronize returned an error");
}
}
|
cf67a970dd9b792a05c54e11af4a42fa11ac7a11.cu
|
#pragma once
#include "relulayer.h"
#include "cuda_runtime.h"
#include "math.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdexcept>
__global__ void ReluLayer_Forward_cu(ReluNode *node, double *previousLayerForward, double *out)
{
if (previousLayerForward[blockIdx.x] < 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = previousLayerForward[blockIdx.x];
}
}
__global__ void ReluLayer_Backward_cu(ReluNode *node, double *forward, double* nextlayerBackward, double *out, double learnRate)
{
if (forward[blockIdx.x] <= 0)
{
out[blockIdx.x] = 0;
}
else
{
out[blockIdx.x] = nextlayerBackward[blockIdx.x];
}
}
void ReluLayer_Forward(ReluNode *node, double *previousLayerForward, double *output, int nodeCount)
{
ReluLayer_Forward_cu <<<nodeCount, 1 >>>(node, previousLayerForward, output);
if (cudaGetLastError() != cudaError::cudaSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA method returned an error");
}
if (cudaDeviceSynchronize() != cudaError::cudaSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA syncronize returned an error");
}
}
void ReluLayer_Backward(ReluNode *node, double *forward, double* nextlayerBackward, double *output, int nodeCount, double learnRate)
{
ReluLayer_Backward_cu <<<nodeCount, 1 >>>(node, forward, nextlayerBackward, output, learnRate);
if (cudaGetLastError() != cudaError::cudaSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA method returned an error");
}
if (cudaDeviceSynchronize() != cudaError::cudaSuccess)
{
throw std::runtime_error("ReluLayer Forward CUDA syncronize returned an error");
}
}
|
1e544d61f875cd75c2fb080b3d2cbe0667d6c8a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "common.h"
// GPU kernel to perform Vector Addition
__global__ void vector_scalingKernel(float* ad, float* cd, float scaleFactor, int size)
{
// Retrive thread id within the block
int th_id = threadIdx.x + blockIdx.x * blockDim.x;
// Perform vector addition
while(th_id<size) {
cd[th_id] = ad[th_id] * scaleFactor;
th_id= blockDim.x * gridDim.x;
}
}
bool scaleVectorGPU( float* a, float* c, float scaleFactor, int size )
{
// Error return value
hipError_t status;
// Number of bytes in a vector
int bytes = size * sizeof(float);
// Pointer to the device arrays
float *ad, *cd;
// Device pointer to pinned meory
hipHostGetDevicePointer( (void**)&ad, a, 0 );
hipHostGetDevicePointer( (void**)&cd, c, 0 );
dim3 dimBlock(1024); // is contained in a block
dim3 dimGrid((size+1023)/1024);
// Launch the kernel on a size-by-size block of threads
hipLaunchKernelGGL(( vector_scalingKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, cd, scaleFactor, size);
hipDeviceSynchronize();// Sync threads
// Check for errors
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed: " << hipGetErrorString(status) << std::endl;
return false;
}
// Success
return true;
}
|
1e544d61f875cd75c2fb080b3d2cbe0667d6c8a1.cu
|
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "common.h"
// GPU kernel to perform Vector Addition
__global__ void vector_scalingKernel(float* ad, float* cd, float scaleFactor, int size)
{
// Retrive thread id within the block
int th_id = threadIdx.x + blockIdx.x * blockDim.x;
// Perform vector addition
while(th_id<size) {
cd[th_id] = ad[th_id] * scaleFactor;
th_id= blockDim.x * gridDim.x;
}
}
bool scaleVectorGPU( float* a, float* c, float scaleFactor, int size )
{
// Error return value
cudaError_t status;
// Number of bytes in a vector
int bytes = size * sizeof(float);
// Pointer to the device arrays
float *ad, *cd;
// Device pointer to pinned meory
cudaHostGetDevicePointer( (void**)&ad, a, 0 );
cudaHostGetDevicePointer( (void**)&cd, c, 0 );
dim3 dimBlock(1024); // is contained in a block
dim3 dimGrid((size+1023)/1024);
// Launch the kernel on a size-by-size block of threads
vector_scalingKernel<<<dimGrid, dimBlock>>>(ad, cd, scaleFactor, size);
cudaThreadSynchronize();// Sync threads
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed: " << cudaGetErrorString(status) << std::endl;
return false;
}
// Success
return true;
}
|
c3dd77d62a659921b73b784d0b926a91c7df4598.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "SceECM.h"
#include "SceCells.h" // Because of forward declaration
//# define debugModeECM
// bending stiffness is given inside the code. It should be given as in input from a txt file.
//isInitPhase bool variable is not active anymore.
//Right now it is assumed that ECM stiffness is the same everywhere.
__constant__ double sceInterCell_ECM[5];
//__constant__ double wLCPara_ECM[4];
__constant__ double restLenECMAdhSpringGPU ;
__constant__ double maxLenECMAdhSpringGPU ;
__constant__ double kAdhECMGPU ;
__constant__ double stiffnessECMBasalGPU ;
__constant__ double stiffnessECMBCGPU ;
__constant__ double stiffnessECMPeripGPU ;
__constant__ double lknotECMBasalGPU ;
__constant__ double lknotECMBCGPU ;
__constant__ double lknotECMPeripGPU ;
const double smallNumber=.000001 ;
namespace patch{
template <typename T> std::string to_string (const T& n)
{
std:: ostringstream stm ;
stm << n ;
return stm.str() ;
}
}
__device__
void DefineECMStiffnessAndLknot ( EType nodeType, double & stiffness, double & sponLen) {
if (nodeType==excm) {
stiffness=stiffnessECMBasalGPU ;
sponLen=lknotECMBasalGPU ;
}
if (nodeType==perip) {
stiffness=stiffnessECMPeripGPU ;
sponLen=lknotECMPeripGPU ;
}
if (nodeType==bc2) {
stiffness=stiffnessECMBCGPU;
sponLen=lknotECMBCGPU ;
}
}
__device__
double calMorse_ECM(const double& linkLength ) {
double forceValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
forceValue = 0;
} else {
forceValue = -sceInterCell_ECM[0] / sceInterCell_ECM[2]
* exp(-linkLength / sceInterCell_ECM[2])
+ sceInterCell_ECM[1] / sceInterCell_ECM[3]
* exp(-linkLength / sceInterCell_ECM[3]);
// if (forceValue > 0) {
// forceValue = 0;
// }
}
return (forceValue) ;
}
__device__
double calMorseEnergy_ECM(const double& linkLength ) {
double energyValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
energyValue = 0;
} else {
energyValue = sceInterCell_ECM[0]* exp(-linkLength / sceInterCell_ECM[2])
- sceInterCell_ECM[1]* exp(-linkLength / sceInterCell_ECM[3]);
}
return (energyValue) ;
}
/*
__device__
double calWLC_ECM(const double& linkLength ) {
double x=linkLength/wLCPara_ECM[0] ;
return (wLCPara_ECM[1]*( 6*x+ ( x*x*(3.0-2*x))/( (1-x)*(1-x) ) )
-wLCPara_ECM[2]/pow(linkLength,wLCPara_ECM[3]) ) ;
}
*/
__device__
bool IsValidAdhPair(const double& dist ) {
if (dist > restLenECMAdhSpringGPU && dist < maxLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
bool IsValidAdhPairForNotInitPhase(const double& dist ) {
if (dist > restLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
double CalAdhECM(const double& dist ) {
return (kAdhECMGPU*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
__device__
double CalAdhEnergy(const double& dist ) {
return (0.5*kAdhECMGPU*(dist-restLenECMAdhSpringGPU)*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
EType SceECM:: ConvertStringToEType(string eNodeRead) {
if (eNodeRead=="perip") {
return perip ;
}
else if (eNodeRead=="bc2") {
return bc2 ;
}
else if (eNodeRead=="excm") {
return excm ;
}
else {
cout << "Error in defining type of external nodes" << endl ;
return excm ;// To just return something to avoid compiler complain
}
}
SceECM::SceECM() {
isECMNeighborSet=false ;
eCMRemoved=false ;
isECMNeighborResetPostDivision = false;
}
void SceECM::Initialize(uint maxAllNodePerCellECM, uint maxMembrNodePerCellECM, uint maxTotalNodesECM, int freqPlotData, string uniqueSymbol) {
maxAllNodePerCell=maxAllNodePerCellECM ;
maxMembrNodePerCell= maxMembrNodePerCellECM ;
maxTotalNodes=maxTotalNodesECM ; //Ali
this->freqPlotData=freqPlotData ;
this->uniqueSymbol=uniqueSymbol ;
std::fstream readCoord_ECM ;
std::fstream readInput_ECM ;
int numberNodes_ECM ;
double tmpPosX_ECM,tmpPosY_ECM ;
vector<double> posXIni_ECM,posYIni_ECM ;
vector <EType> eNodeVec ;
int resumeSimulation = globalConfigVars.getConfigValue(
"ResumeSimulation").toInt();
if (resumeSimulation==0) {
cout << " In the ECM module, I am in start mode" << endl ;
readCoord_ECM.open("./resources/coordinate_ECM21.txt") ;
}
else if(resumeSimulation==1) {
cout << " In the ECM module, I am in resume mode" << endl ;
std::string secondInputFileName = "./resources/DataFileECM_" + uniqueSymbol + "Resume.cfg";
readCoord_ECM.open(secondInputFileName.c_str()) ;
}
else{
throw std::invalid_argument(" ResumeSimulation parameter in the input file must be either 1 or 0. Error from ECM module");
}
if (readCoord_ECM.is_open()) {
cout << "ECM coordinates file opened successfully" <<endl ;
}
else {
cout << "ECM coordinates file is not opened successfully" << endl ;
}
string inputInfoText ;
string eNodeRead ;
readCoord_ECM>>numberNodes_ECM ;
for (int i=0 ; i<numberNodes_ECM ; i++){
readCoord_ECM>>tmpPosX_ECM>>tmpPosY_ECM>>eNodeRead ;
posXIni_ECM.push_back(tmpPosX_ECM) ;
posYIni_ECM.push_back(tmpPosY_ECM) ;
EType eNode=ConvertStringToEType(eNodeRead) ;
eNodeVec.push_back(eNode) ;
}
readInput_ECM.open("./resources/ECM_input.txt") ;
if (readInput_ECM.is_open()) {
cout << "ECM Mech input opened successfully" <<endl ;
}
else {
cout << "ECM Mech input is not opened successfully" << endl ;
}
readInput_ECM>> inputInfoText ;
for (int i=0 ; i<5; i++) {
readInput_ECM>> mechPara_ECM.sceInterCellCPU_ECM[i] ; //=39.0 ;
}
// readInput_ECM>>restLenECMSpring ;
// readInput_ECM>>eCMLinSpringStiff ;
readInput_ECM>>restLenECMAdhSpring ;
readInput_ECM>>maxLenECMAdhSpring ;
readInput_ECM>>kAdhECM ;
//for ( int i=0 ; i<4 ; i++) {
// readInput_ECM>>mechPara_ECM.wLCParaCPU_ECM[i] ;
// }
std::fstream secondInput_ECM ;
std:: string secondInputInfo ; //dummy
std::string secondInputFileName = "./resources/ECM_" + uniqueSymbol + "input.cfg";
secondInput_ECM.open(secondInputFileName.c_str()) ;
//secondInput_ECM.open("./resources/ECM_N01G00_input.cfg" ) ;
if (secondInput_ECM.is_open()) {
cout << "Second ECM Mech input opened successfully" <<endl ;
}
else {
cout << "Second ECM Mech input is not opened successfully" << endl ;
}
secondInput_ECM>>secondInputInfo ; // just for information no use in the code
secondInput_ECM>>stiffnessECMBasal ;
secondInput_ECM>>stiffnessECMBC ;
secondInput_ECM>>stiffnessECMPerip ;
secondInput_ECM>>lknotECMBasal ;
secondInput_ECM>>lknotECMBC ;
secondInput_ECM>>lknotECMPerip ;
secondInput_ECM>>dampBasal ;
secondInput_ECM>>dampBC ;
secondInput_ECM>>dampApical ;
cout <<" stiffness of ECM at the basal side is="<<stiffnessECMBasal <<endl ;
cout <<" stiffness of ECM at boundary is="<<stiffnessECMBC<<endl ;
cout <<" stiffness of ECM peripodial side is="<<stiffnessECMPerip<<endl ;
cout <<" rest len basal ECM is="<<lknotECMBasal<<endl ;
cout <<" rest len boundary ECM is= "<<lknotECMBC<<endl ;
cout << "rest len peripodial ECM is=" <<lknotECMPerip <<endl ;
cout << "Damping for basal ECM is="<<dampBasal<<endl ;
cout << "Damping for boundary ECM is= "<<dampBC<<endl ;
cout << "Damping for peripodial ECM is=" <<dampApical <<endl ;
cout << "number of ECM nodes is"<< numberNodes_ECM <<endl ;
for (int i=0 ; i<5; i++) {
cout <<"Morse parameter number"<<i<<" is " <<mechPara_ECM.sceInterCellCPU_ECM[i]<<endl ;
}
//cout <<"rest length of ECM spring is "<<restLenECMSpring<<endl ;
// cout <<"ECM spring stiffness is "<<eCMLinSpringStiff<<endl ;
cout <<"ECM Membrane neutral adhesion length is "<<restLenECMAdhSpring<<endl ;
cout <<"ECM Membrane max adhesion length is "<<maxLenECMAdhSpring<<endl ;
cout <<"ECM Membrane adhesion stiffness is "<<kAdhECM<<endl ;
cout << "ECM only applies adhesvie force" << endl ;
//for ( int i=0 ; i<4 ; i++) {
// cout<<"wLC parameter "<< i << " is "<<mechPara_ECM.wLCParaCPU_ECM[i]<<endl ; ;
//}
hipMemcpyToSymbol(sceInterCell_ECM,mechPara_ECM.sceInterCellCPU_ECM
,5*sizeof(double));
//hipMemcpyToSymbol(wLCPara_ECM,mechPara_ECM.wLCParaCPU_ECM
// ,4*sizeof(double));
hipMemcpyToSymbol(restLenECMAdhSpringGPU, &restLenECMAdhSpring,sizeof(double));
hipMemcpyToSymbol(maxLenECMAdhSpringGPU, &maxLenECMAdhSpring,sizeof(double));
hipMemcpyToSymbol(kAdhECMGPU, &kAdhECM,sizeof(double));
hipMemcpyToSymbol(stiffnessECMPeripGPU, &stiffnessECMPerip,sizeof(double));
hipMemcpyToSymbol(stiffnessECMBCGPU, &stiffnessECMBC,sizeof(double));
hipMemcpyToSymbol(stiffnessECMBasalGPU, &stiffnessECMBasal,sizeof(double));
hipMemcpyToSymbol(lknotECMPeripGPU, & lknotECMPerip,sizeof(double));
hipMemcpyToSymbol(lknotECMBCGPU, & lknotECMBC,sizeof(double));
hipMemcpyToSymbol(lknotECMBasalGPU, & lknotECMBasal,sizeof(double));
counter=100000 ; //large number
lastPrintECM=1000000 ; // large number
outputFrameECM=0 ;
numNodesECM= numberNodes_ECM ; //(eCMMaxX-eCMMinX)/eCMMinDist ;
indexECM.resize(numNodesECM,0) ;
peripORexcm.resize(numNodesECM,perip) ;
dampCoef.resize(numNodesECM) ;
nodeECMLocX.resize(numNodesECM,0.0) ;
nodeECMLocY.resize(numNodesECM,0.0) ;
cellNeighborId.resize(numNodesECM,-1) ;
stiffLevel.resize(numNodesECM) ;
sponLen.resize(numNodesECM) ;
linSpringForceECMX.resize(numNodesECM,0.0);
linSpringForceECMY.resize(numNodesECM,0.0);
linSpringAvgTension.resize(numNodesECM,0.0);
linSpringEnergy.resize(numNodesECM,0.0);
morseEnergy.resize(numNodesECM,0.0);
adhEnergy.resize(numNodesECM,0.0);
bendSpringForceECMX.resize(numNodesECM,0.0);
bendSpringForceECMY.resize(numNodesECM,0.0);
memMorseForceECMX.resize(numNodesECM,0.0);
memMorseForceECMY.resize(numNodesECM,0.0);
fBendCenterX.resize(numNodesECM,0.0);
fBendCenterY.resize(numNodesECM,0.0);
fBendLeftX.resize(numNodesECM,0.0);
fBendLeftY.resize(numNodesECM,0.0);
fBendRightX.resize(numNodesECM,0.0);
fBendRightY.resize(numNodesECM,0.0);
totalForceECMX.resize(numNodesECM,0.0);
totalForceECMY.resize(numNodesECM,0.0);
totalExplicitForceECMX.resize(numNodesECM,0.0);
totalExplicitForceECMY.resize(numNodesECM,0.0);
rHSX.resize(numNodesECM,0.0);
rHSY.resize(numNodesECM,0.0);
//memNodeType.resize(maxTotalNodes,notAssigned1) ;
nodeIsActive.resize(numNodesECM,true) ;
thrust::sequence (indexECM.begin(),indexECM.begin()+numNodesECM);
thrust::copy(posXIni_ECM.begin(),posXIni_ECM.end(),nodeECMLocX.begin()) ;
thrust::copy(posYIni_ECM.begin(),posYIni_ECM.end(),nodeECMLocY.begin()) ;
thrust::copy(eNodeVec.begin(),eNodeVec.end(),peripORexcm.begin()) ;
AssignDampCoef() ;
// cout << "GPU level initial coordinates and type of external nodes are: " << endl ;
// for (int i=0; i<nodeECMLocX.size() ; i++) {
// cout<< nodeECMLocX[i]<<", "<<nodeECMLocY[i]<<", "<<peripORexcm[i] << endl;
// }
PrintECM(0.0) ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol + ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str());
EnergyExport <<"Time,"<<"TotalMorseEnergyECM," << "TotalAdhEnergyECM,"<<"TotalLinSpringEnergy,"<<"TotalEnergy, " <<"TotalEnergyDerivative"<< std::endl;
} //initilaization function finished
void SceECM:: ApplyECMConstrain(int currentActiveCellCount, int totalNodeCountForActiveCellsECM, double curTime, double dt, double Damp_CoefCell, bool cellPolar, bool subCellPolar, bool isInitPhase, double timeRatio, double timeRatio_Crit_ECM, double timeRatio_Crit_Division){
bool implicit_solver_active = false ;
if (eCMRemoved) {
PrintECMRemoved(curTime);
cout << "ECM is removed" << endl ;
return ;
}
// if (timeRatio == timeRatio_Crit_ECM){
// cout<<"Localized ECM weakening is triggered"<<endl;
// }
#ifdef debugModeECM
hipEvent_t start1, start2, start3, start4, start5, start6, start7, start8, stop;
float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ;
hipEventCreate(&start1);
hipEventCreate(&start2);
hipEventCreate(&start3);
hipEventCreate(&start4);
hipEventCreate(&start5);
hipEventCreate(&start6);
hipEventCreate(&start7);
hipEventCreate(&start8);
hipEventCreate(&stop);
hipEventRecord(start1, 0);
#endif
nodeCellLocXOld.resize(totalNodeCountForActiveCellsECM) ;
nodeCellLocYOld.resize(totalNodeCountForActiveCellsECM) ;
integrinMultipOld.resize(totalNodeCountForActiveCellsECM) ;
// nodeCellLocZOld.resize(totalNodeCountForActiveCellsECM) ;
adhPairECM_Cell.resize(totalNodeCountForActiveCellsECM,-1) ;
morseEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
adhEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocX.begin(),nodesPointerECM->getInfoVecs().nodeLocX.begin()+totalNodeCountForActiveCellsECM,nodeCellLocXOld.begin()) ;
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocY.begin(),nodesPointerECM->getInfoVecs().nodeLocY.begin()+totalNodeCountForActiveCellsECM,nodeCellLocYOld.begin()) ;
thrust::copy(nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin(),nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin()+totalNodeCountForActiveCellsECM,integrinMultipOld.begin()) ;
// thrust::copy(nodesPointerECM->getInfoVecs().nodeLocZ.begin(),nodesPointerECM->getInfoVecs().nodeLocZ.begin()+totalNodeCountForActiveCellsECM,nodeCellLocZOld.begin()) ;
#ifdef debugModeECM
hipEventRecord(start2, 0);
hipEventSynchronize(start2);
hipEventElapsedTime(&elapsedTime1, start1, start2);
#endif
thrust:: transform (peripORexcm.begin(), peripORexcm.begin()+numNodesECM,
thrust::make_zip_iterator (thrust::make_tuple (stiffLevel.begin(),sponLen.begin())),MechProp());
// cout << " Mechanical properties after assignment is " << stiffLevel[0] << endl ;
counter ++ ;
//if (counter>=100 || curTime<(100*dt) || isECMNeighborSet==false) {
// if (curTime<(100*dt) || isECMNeighborSet==false) {
if (curTime < (100*dt) || isECMNeighborSet == false){
isECMNeighborSet=true ;
counter=0 ;
FindNeighborCandidateForCellsAndECMNodes();
}
// else if (timeRatio > timeRatio_Crit_Division && isECMNeighborResetPostDivision == false){
// if (cellsPointerECM->getCellInfoVecs().isOneTimeStepPostDivision ==true || cellsPointerECM->getCellInfoVecs().isTwoTimeStepPostDivision ==true){
if (cellsPointerECM->getCellInfoVecs().isPostDivision ==true || cellsPointerECM->getCellInfoVecs().isPostAddMembrNodes == true){
std::cout<<"Resetting ecm and cell neighbor info! post division!"<<std::endl;
FindNeighborCandidateForCellsAndECMNodes();
isECMNeighborResetPostDivision=true;
}
// if (timeRatio == timeRatio_Crit_Division){
// isECMNeighborResetPostDivision=false;
// }
// }
#ifdef debugModeECM
hipEventRecord(start3, 0);
hipEventSynchronize(start3);
hipEventElapsedTime(&elapsedTime2, start2, start3);
#endif
MoveCellNodesByECMForces(totalNodeCountForActiveCellsECM,currentActiveCellCount,dt, Damp_CoefCell) ;
/* To reduce computational cost
energyECM.totalMorseEnergyCellECM = thrust::reduce( morseEnergyCell.begin(),morseEnergyCell.begin()+totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyCellECM = thrust::reduce( adhEnergyCell.begin() ,adhEnergyCell.begin() +totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
*/
CalLinSpringForce(timeRatio, timeRatio_Crit_ECM);
CalBendSpringForce();
#ifdef debugModeECM
hipEventRecord(start4, 0);
hipEventSynchronize(start4);
hipEventElapsedTime(&elapsedTime3, start3, start4);
#endif
CalCellForcesOnECM() ;
//energyECM.totalLinSpringEnergyECM = 0.5 * ( thrust::reduce( linSpringEnergy.begin(),linSpringEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() ));
//to make sure it is based on the distance used for action force calculation.
/* To reduce computational cost
energyECM.totalMorseEnergyECMCell = thrust::reduce( morseEnergy.begin(),morseEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyECMCell = thrust::reduce( adhEnergy.begin() ,adhEnergy.begin() +numNodesECM,(double) 0.0, thrust::plus<double>() );
*/
if (!implicit_solver_active) {
// Calculate summation of all forces and move nodes if explicit solver is going to be used
CalSumForcesOnECM() ;
MoveNodesBySumAllForces(dt) ;
}
if (implicit_solver_active) {
//Calculate right hand side of implicit solver which includes explicit forces
CalSumOnlyExplicitForcesOnECM() ;
CalRHS(dt) ;
}
#ifdef debugModeECM
hipEventRecord(start5, 0);
hipEventSynchronize(start5);
hipEventElapsedTime(&elapsedTime4, start4, start5);
#endif
//Create tmp CPU vectors for using in implicit solver. Declariation is not acceptable to be inisde the if condition
vector <double> tmpRHSX(numNodesECM);
vector <double> tmpRHSY(numNodesECM);
tmpHostNodeECMLocX.resize(numNodesECM);
tmpHostNodeECMLocY.resize(numNodesECM);
if (implicit_solver_active) {
// Copy ECM locations from GPU to CPU if implicit solver is used
thrust::copy (rHSX.begin(), rHSX.begin()+numNodesECM, tmpRHSX.begin());
thrust::copy (rHSY.begin(), rHSY.begin()+numNodesECM, tmpRHSY.begin());
thrust::copy (nodeECMLocX.begin(), nodeECMLocX.begin()+numNodesECM, tmpHostNodeECMLocX.begin());
thrust::copy (nodeECMLocY.begin(), nodeECMLocY.begin()+numNodesECM, tmpHostNodeECMLocY.begin());
//cout << "max RHSX is " << *max_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
//cout << "min RHSX is " << *min_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
//cout << "max RHSY is " << *max_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
//cout << "min RHSY is " << *min_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
}
#ifdef debugModeECM
hipEventRecord(start6, 0);
hipEventSynchronize(start6);
hipEventElapsedTime(&elapsedTime5, start5, start6);
#endif
if (implicit_solver_active) {
// setting up eqaution of motion if implicit solver is used
EquMotionCoef (dt);
}
#ifdef debugModeECM
hipEventRecord(start7, 0);
hipEventSynchronize(start7);
hipEventElapsedTime(&elapsedTime6, start6, start7);
#endif
if (implicit_solver_active) {
// Fetch the implicit solver and update ECM location if implicit solver is used
tmpHostNodeECMLocX =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSX,indexPrev, indexNext, tmpHostNodeECMLocX);
tmpHostNodeECMLocY =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSY,indexPrev,indexNext, tmpHostNodeECMLocY);
// copy ECM node locations back from CPU to GPU if implicit solver is used
thrust::copy (tmpHostNodeECMLocX.begin(), tmpHostNodeECMLocX.begin()+numNodesECM, nodeECMLocX.begin());
thrust::copy (tmpHostNodeECMLocY.begin(), tmpHostNodeECMLocY.begin()+numNodesECM, nodeECMLocY.begin());
}
#ifdef debugModeECM
hipEventRecord(start8, 0);
hipEventSynchronize(start8);
hipEventElapsedTime(&elapsedTime7, start7, start8);
#endif
/* To reduce computational cost
cout << "total Morse energy for cell-ECM is= "<< energyECM.totalMorseEnergyCellECM << endl ;
cout << "total Morse energy for ECM-cell is= "<< energyECM.totalMorseEnergyECMCell << endl ;
cout << "total adhesion energy for cell-ECM is= "<< energyECM.totalAdhEnergyCellECM << endl ;
cout << "total adhesion energy for ECM-cell is= "<< energyECM.totalAdhEnergyECMCell << endl ;
//assert (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)<1.0) ;
//assert (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) <1.0) ;
if ( (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)>1.0) ||
(abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) >1.0)
) {
cout << "Warning: Action and reaction forces in the ECM do not match each other" << endl ;
}
*/
# ifdef debugModeECM
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime8, start8, stop);
std::cout << "time 1 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ;
std::cout << "time 2 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ;
std::cout << "time 3 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ;
std::cout << "time 4 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ;
std::cout << "time 5 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ;
std::cout << "time 6 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ;
std::cout << "time 7 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ;
std::cout << "time 8 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ;
#endif
//throw std::invalid_argument(" Solver called properly and I want to stop the code");
PrintECM(curTime);
}
void SceECM:: PrintECM(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in regular print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
<< 0.0 << std::endl;
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
// second output file for curvature estimation //
std::string txtFileName = "./ECMFolder/ECMLocationExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMLocationExport ;
ECMLocationExport.open(txtFileName.c_str());
//ECMExport << "ECM pouch coordinates" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==excm) {
ECMLocationExport<< nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << " "<< peripORexcm[i]<<std::endl;
// }
}
//ECMExport << "ECM lumen side coordinates" << std::endl;
// for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==perip) {
// ECMLocationExport << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
// << 0.0 << std::endl;
// }
// }
ECMLocationExport.close();
//Third write file for ECM
txtFileName = "./ECMFolder/ECMTensionExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMTensionExport ;
ECMTensionExport.open(txtFileName.c_str());
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMTensionExport<< linSpringAvgTension[i]<< " " << peripORexcm[i]<< std::endl;
}
ECMTensionExport.close();
///
//Fourth write file for ECM
energyECM.totalEnergyECMOld=energyECM.totalEnergyECM ;
energyECM.totalEnergyECM= energyECM.totalMorseEnergyECMCell
+ energyECM.totalAdhEnergyECMCell
+ energyECM.totalLinSpringEnergyECM ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol+ ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str(),ofstream::app);
//EnergyExport <<"totalMorseEnergyCell " << "totalAdhEnergyCell "<< "totalMorseEnergy "<<"totalAdhEnergy "<< "totalLinSpringEnergy " << std::endl;
EnergyExport <<curTime<<","<<energyECM.totalMorseEnergyECMCell << "," << energyECM.totalAdhEnergyECMCell<< "," << energyECM.totalLinSpringEnergyECM <<"," << energyECM.totalEnergyECM <<","<<energyECM.totalEnergyPrimeECM <<std::endl;
}
}
// This is just to create a file to be able to generate the movie with consisten frames
void SceECM:: PrintECMRemoved(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in ECM removed print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << -500.0 << " " << -500.0 << " "
<< 0.0 << std::endl; // Just out of domain
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
}
}
AniResumeData SceECM:: obtainResumeData() {
AniResumeData aniResumeData ;
thrust:: host_vector<double> hostTmpLocX;
thrust:: host_vector<double> hostTmpLocY;
thrust:: host_vector<EType> hostTmpType;
hostTmpLocX.resize(numNodesECM) ;
hostTmpLocY.resize(numNodesECM) ;
hostTmpType.resize(numNodesECM) ;
cout << " I am in obtainResumeData function" << endl ;
thrust::copy (
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin()))+numNodesECM,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpLocX.begin(),hostTmpLocY.begin(),hostTmpType.begin())));
cout << " I start passing to regular vector variables" << endl ;
CVector tmp;
for( int i=0 ; i<numNodesECM ; i++) {
tmp=CVector (hostTmpLocX[i], hostTmpLocY[i], 0.0) ;
aniResumeData.nodePosArr.push_back(tmp) ;
aniResumeData.nodeECMType.push_back(hostTmpType[i]) ;
}
return aniResumeData ;
}
void SceECM::EquMotionCoef (double dt) {
vector <double> sponLenHost(numNodesECM) ;
vector <double> sponLenWithNext ;
vector <double> sponLenWithPrev ;
vector <double> distWithNext ;
vector <double> distWithPrev ;
vector <double> dampCoefHost ;
sponLenWithNext.clear();
sponLenWithPrev.clear();
distWithNext.clear() ;
distWithPrev.clear() ;
hCoefLd.clear() ;
hCoefUd.clear() ;
hCoefD.clear() ;
indexNext.clear() ;
indexPrev.clear() ;
dampCoefHost.clear() ;
indexNext.resize(numNodesECM) ;
indexPrev.resize(numNodesECM) ;
dampCoefHost.resize(numNodesECM) ;
thrust::copy(sponLen.begin(),sponLen.begin()+numNodesECM, sponLenHost.begin()) ;
thrust::copy(dampCoef.begin(),dampCoef.begin()+numNodesECM, dampCoefHost.begin()) ;
double k=stiffLevel[0] ; //Assumming ECM is homogenous in mechanical properties
for ( int i=0 ; i< numNodesECM ; i++) {
indexNext.at(i)=i+1 ;
indexPrev.at(i)=i-1 ;
if (i==numNodesECM-1){
indexNext.at(i)=0 ;
}
if (i==0){
indexPrev.at(i)=numNodesECM-1 ;
}
sponLenWithNext.push_back( 0.5*(sponLenHost[indexNext.at(i)]+sponLenHost[i]) );
sponLenWithPrev.push_back( 0.5*(sponLenHost[indexPrev.at(i)]+sponLenHost[i]) );
distWithNext.push_back(sqrt( pow(tmpHostNodeECMLocX[indexNext.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexNext.at(i)]-tmpHostNodeECMLocY[i],2))) ;
distWithPrev.push_back(sqrt( pow(tmpHostNodeECMLocX[indexPrev.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexPrev.at(i)]-tmpHostNodeECMLocY[i],2)));
}
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefD.push_back (1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
hCoefLd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ))) ;
hCoefUd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
}
#ifdef debugModeECM
cout <<"max distance with next node is" <<*max_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"min distance with next node is" << *min_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"max distance with previous node is" <<*max_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
cout <<"min distance with previous node is" << *min_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
vector < double> hCoefDAbs;
hCoefDAbs.clear() ;
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefDAbs.push_back (abs(1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 )))) ;
}
cout <<"max main diag. elment is " << *max_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main diag. element is " << *min_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main Abs(diag.) element is " << *min_element ( hCoefDAbs.begin(), hCoefDAbs.begin() +numNodesECM) <<endl ;
cout <<"max upper diag. element is " << *max_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"min upper diag. element is " << *min_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"max lower diag. element is " << *max_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"min lower diag. element is " << *min_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"stiffness, time step and first element of damping vector is " << endl ;
cout << k <<","<< dt<<"," << dampCoefHost.at(0) << endl ;
cout << "constants for stiffness matrix calculated " << endl ;
cout << "last diagonal element is " << hCoefD.at(numNodesECM-1) << endl ;
cout << " number of ECM nodes is "<< numNodesECM << endl ;
# endif
}
void SceECM::MoveCellNodesByECMForces(int totalNodeCountForActiveCellsECM,int currentActiveCellCount, double dt, double Damp_CoefCell)
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
bool* isEnteringMitotic = thrust::raw_pointer_cast(
&(cellsPointerECM->getCellInfoVecs().isEnteringMitotic[0]));
// move the nodes of epithelial cells
//// find the closest ECM node to each each cell //
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust::counting_iterator<int> iBegin(0) ;
thrust::counting_iterator<int> iBegin2(0) ;
//////////////////////////////////////////
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin(),
nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin()
)),
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin(),
nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin()
))+totalNodeCountForActiveCellsECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
adhPairECM_Cell.begin(),
morseEnergyCell.begin(),
adhEnergyCell.begin())),
MoveNodes2_Cell(nodeECMLocXAddr,nodeECMLocYAddr,maxMembrNodePerCell,numNodesECM,dt,Damp_CoefCell,peripORexcmAddr,currentActiveCellCount, isEnteringMitotic));
}
void SceECM::CalLinSpringForce(double timeRatio, double timeRatio_Crit_ECM)
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double* stiffLevelAddr=thrust::raw_pointer_cast (
&stiffLevel[0]) ;
double* sponLenAddr =thrust::raw_pointer_cast (
&sponLen[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
linSpringAvgTension.begin(),
linSpringEnergy.begin())),
LinSpringForceECM(numNodesECM,nodeECMLocXAddr,nodeECMLocYAddr,stiffLevelAddr,sponLenAddr, timeRatio, timeRatio_Crit_ECM));
//////////////////////////////////// find the closest Cell to each ECM node ///////////
///////////////////////////////////
//cout << " I am after FindCellNeighbor functor" << endl ;
}
void SceECM::CalBendSpringForce()
{
const double eCMBendStiff=6.0 ; // need to be an input
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
fBendCenterX.begin(),
fBendCenterY.begin(),
fBendLeftX.begin(),
fBendLeftY.begin(),
fBendRightX.begin(),
fBendRightY.begin())),
CalBendECM(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM,eCMBendStiff));
double* fBendLeftXAddr= thrust::raw_pointer_cast (
&fBendLeftX[0]) ;
double* fBendLeftYAddr= thrust::raw_pointer_cast (
&fBendLeftY[0]) ;
double* fBendRightXAddr= thrust::raw_pointer_cast (
&fBendRightX[0]) ;
double* fBendRightYAddr= thrust::raw_pointer_cast (
&fBendRightY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin())),
SumBendForce(fBendLeftXAddr,fBendLeftYAddr,fBendRightXAddr,fBendRightYAddr,numNodesECM));
}
void SceECM::CalCellForcesOnECM()
{
bool* nodeIsActiveAddr= thrust::raw_pointer_cast (
& (nodesPointerECM->getInfoVecs().nodeIsActive[0])) ;
int * adhPairECM_CellAddr= thrust::raw_pointer_cast (
&adhPairECM_Cell[0]) ;
//Old locations are chosen to make sure action-reaction balance of forces between ECM and cell nodes are fully satisfied.
double* nodeCellLocXAddr= thrust::raw_pointer_cast (
&nodeCellLocXOld[0]) ;
double* nodeCellLocYAddr= thrust::raw_pointer_cast (
&nodeCellLocYOld[0]) ;
// double* nodeCellLocZAddr= thrust::raw_pointer_cast (
// &nodeCellLocZOld[0]) ;
double* integrinMultip = thrust::raw_pointer_cast (
&integrinMultipOld[0]);
bool* isEnteringMitotic = thrust::raw_pointer_cast (
&(cellsPointerECM->getCellInfoVecs().isEnteringMitotic[0]));
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
memMorseForceECMX.begin(),
memMorseForceECMY.begin(),
morseEnergy.begin(),
adhEnergy.begin())),
MorseAndAdhForceECM(numCells,maxAllNodePerCell,maxMembrNodePerCell,nodeCellLocXAddr,nodeCellLocYAddr,integrinMultip,nodeIsActiveAddr,adhPairECM_CellAddr, isEnteringMitotic));
}
void SceECM::CalSumForcesOnECM()
{
double dummy=0.0 ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalForceECMX.begin(),
totalForceECMY.begin())),
TotalECMForceCompute(dummy));
}
void SceECM::CalSumOnlyExplicitForcesOnECM() {
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin())),
TotalExplicitECMForceCompute());
}
void SceECM::CalRHS(double dt)
{
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
rHSX.begin(),
rHSY.begin())),
RHSCompute(dt));
}
void SceECM::MoveNodesBySumAllForces(double dt)
{
// move the nodes of ECM
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
MoveNodesECM(dt));
}
void SceECM::FindNeighborCandidateForCellsAndECMNodes()
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double * basalCellLocXAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocX[0]) ) ;
double * basalCellLocYAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocY[0]) ) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
if (cellsPointerECM->getCellInfoVecs().basalLocX.size()>86){
// std::cout<<"In SceECM.cu, basalLoc[86] = "<<cellsPointerECM->getCellInfoVecs().basalLocX[86]<<" "<<cellsPointerECM->getCellInfoVecs().basalLocY[86]<<std::endl;
// std::cout<<"In SceECM.cu, basalLoc[87] = "<<cellsPointerECM->getCellInfoVecs().basalLocX[87]<<" "<<cellsPointerECM->getCellInfoVecs().basalLocY[87]<<std::endl;
}
//// find the closest ECM node to each each cell //
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin()))+numCells,
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
FindECMNeighborPerCell(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM ));
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
cellNeighborId.begin(),
FindCellNeighborPerECMNode(basalCellLocXAddr,basalCellLocYAddr, numCells));
}
void SceECM::AssignDampCoef() {
thrust::transform ( peripORexcm.begin() ,peripORexcm.begin() +numNodesECM, dampCoef.begin(), AssignDamping(dampBasal,dampBC,dampApical) );
#ifdef debugModeECM
for (int i=0 ; i<numNodesECM ; i++) {
if (dampCoef[i] < smallNumber) {
cout << "damping of element " << i << " is " << dampCoef[i] << " which is wrong" <<endl ;
throw::invalid_argument ( "damping coefficients in ECM is not set correctly") ;
}
}
#endif
}
|
c3dd77d62a659921b73b784d0b926a91c7df4598.cu
|
#include "SceECM.h"
#include "SceCells.h" // Because of forward declaration
//# define debugModeECM
// bending stiffness is given inside the code. It should be given as in input from a txt file.
//isInitPhase bool variable is not active anymore.
//Right now it is assumed that ECM stiffness is the same everywhere.
__constant__ double sceInterCell_ECM[5];
//__constant__ double wLCPara_ECM[4];
__constant__ double restLenECMAdhSpringGPU ;
__constant__ double maxLenECMAdhSpringGPU ;
__constant__ double kAdhECMGPU ;
__constant__ double stiffnessECMBasalGPU ;
__constant__ double stiffnessECMBCGPU ;
__constant__ double stiffnessECMPeripGPU ;
__constant__ double lknotECMBasalGPU ;
__constant__ double lknotECMBCGPU ;
__constant__ double lknotECMPeripGPU ;
const double smallNumber=.000001 ;
namespace patch{
template <typename T> std::string to_string (const T& n)
{
std:: ostringstream stm ;
stm << n ;
return stm.str() ;
}
}
__device__
void DefineECMStiffnessAndLknot ( EType nodeType, double & stiffness, double & sponLen) {
if (nodeType==excm) {
stiffness=stiffnessECMBasalGPU ;
sponLen=lknotECMBasalGPU ;
}
if (nodeType==perip) {
stiffness=stiffnessECMPeripGPU ;
sponLen=lknotECMPeripGPU ;
}
if (nodeType==bc2) {
stiffness=stiffnessECMBCGPU;
sponLen=lknotECMBCGPU ;
}
}
__device__
double calMorse_ECM(const double& linkLength ) {
double forceValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
forceValue = 0;
} else {
forceValue = -sceInterCell_ECM[0] / sceInterCell_ECM[2]
* exp(-linkLength / sceInterCell_ECM[2])
+ sceInterCell_ECM[1] / sceInterCell_ECM[3]
* exp(-linkLength / sceInterCell_ECM[3]);
// if (forceValue > 0) {
// forceValue = 0;
// }
}
return (forceValue) ;
}
__device__
double calMorseEnergy_ECM(const double& linkLength ) {
double energyValue=0.0 ;
if (linkLength > sceInterCell_ECM[4]) {
energyValue = 0;
} else {
energyValue = sceInterCell_ECM[0]* exp(-linkLength / sceInterCell_ECM[2])
- sceInterCell_ECM[1]* exp(-linkLength / sceInterCell_ECM[3]);
}
return (energyValue) ;
}
/*
__device__
double calWLC_ECM(const double& linkLength ) {
double x=linkLength/wLCPara_ECM[0] ;
return (wLCPara_ECM[1]*( 6*x+ ( x*x*(3.0-2*x))/( (1-x)*(1-x) ) )
-wLCPara_ECM[2]/pow(linkLength,wLCPara_ECM[3]) ) ;
}
*/
__device__
bool IsValidAdhPair(const double& dist ) {
if (dist > restLenECMAdhSpringGPU && dist < maxLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
bool IsValidAdhPairForNotInitPhase(const double& dist ) {
if (dist > restLenECMAdhSpringGPU){
return true ;
}
else {
return false ;
}
}
__device__
double CalAdhECM(const double& dist ) {
return (kAdhECMGPU*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
__device__
double CalAdhEnergy(const double& dist ) {
return (0.5*kAdhECMGPU*(dist-restLenECMAdhSpringGPU)*(dist-restLenECMAdhSpringGPU));
// in the function IsValid pair, distance already checked to be greater than neutral length
}
EType SceECM:: ConvertStringToEType(string eNodeRead) {
if (eNodeRead=="perip") {
return perip ;
}
else if (eNodeRead=="bc2") {
return bc2 ;
}
else if (eNodeRead=="excm") {
return excm ;
}
else {
cout << "Error in defining type of external nodes" << endl ;
return excm ;// To just return something to avoid compiler complain
}
}
SceECM::SceECM() {
isECMNeighborSet=false ;
eCMRemoved=false ;
isECMNeighborResetPostDivision = false;
}
void SceECM::Initialize(uint maxAllNodePerCellECM, uint maxMembrNodePerCellECM, uint maxTotalNodesECM, int freqPlotData, string uniqueSymbol) {
maxAllNodePerCell=maxAllNodePerCellECM ;
maxMembrNodePerCell= maxMembrNodePerCellECM ;
maxTotalNodes=maxTotalNodesECM ; //Ali
this->freqPlotData=freqPlotData ;
this->uniqueSymbol=uniqueSymbol ;
std::fstream readCoord_ECM ;
std::fstream readInput_ECM ;
int numberNodes_ECM ;
double tmpPosX_ECM,tmpPosY_ECM ;
vector<double> posXIni_ECM,posYIni_ECM ;
vector <EType> eNodeVec ;
int resumeSimulation = globalConfigVars.getConfigValue(
"ResumeSimulation").toInt();
if (resumeSimulation==0) {
cout << " In the ECM module, I am in start mode" << endl ;
readCoord_ECM.open("./resources/coordinate_ECM21.txt") ;
}
else if(resumeSimulation==1) {
cout << " In the ECM module, I am in resume mode" << endl ;
std::string secondInputFileName = "./resources/DataFileECM_" + uniqueSymbol + "Resume.cfg";
readCoord_ECM.open(secondInputFileName.c_str()) ;
}
else{
throw std::invalid_argument(" ResumeSimulation parameter in the input file must be either 1 or 0. Error from ECM module");
}
if (readCoord_ECM.is_open()) {
cout << "ECM coordinates file opened successfully" <<endl ;
}
else {
cout << "ECM coordinates file is not opened successfully" << endl ;
}
string inputInfoText ;
string eNodeRead ;
readCoord_ECM>>numberNodes_ECM ;
for (int i=0 ; i<numberNodes_ECM ; i++){
readCoord_ECM>>tmpPosX_ECM>>tmpPosY_ECM>>eNodeRead ;
posXIni_ECM.push_back(tmpPosX_ECM) ;
posYIni_ECM.push_back(tmpPosY_ECM) ;
EType eNode=ConvertStringToEType(eNodeRead) ;
eNodeVec.push_back(eNode) ;
}
readInput_ECM.open("./resources/ECM_input.txt") ;
if (readInput_ECM.is_open()) {
cout << "ECM Mech input opened successfully" <<endl ;
}
else {
cout << "ECM Mech input is not opened successfully" << endl ;
}
readInput_ECM>> inputInfoText ;
for (int i=0 ; i<5; i++) {
readInput_ECM>> mechPara_ECM.sceInterCellCPU_ECM[i] ; //=39.0 ;
}
// readInput_ECM>>restLenECMSpring ;
// readInput_ECM>>eCMLinSpringStiff ;
readInput_ECM>>restLenECMAdhSpring ;
readInput_ECM>>maxLenECMAdhSpring ;
readInput_ECM>>kAdhECM ;
//for ( int i=0 ; i<4 ; i++) {
// readInput_ECM>>mechPara_ECM.wLCParaCPU_ECM[i] ;
// }
std::fstream secondInput_ECM ;
std:: string secondInputInfo ; //dummy
std::string secondInputFileName = "./resources/ECM_" + uniqueSymbol + "input.cfg";
secondInput_ECM.open(secondInputFileName.c_str()) ;
//secondInput_ECM.open("./resources/ECM_N01G00_input.cfg" ) ;
if (secondInput_ECM.is_open()) {
cout << "Second ECM Mech input opened successfully" <<endl ;
}
else {
cout << "Second ECM Mech input is not opened successfully" << endl ;
}
secondInput_ECM>>secondInputInfo ; // just for information no use in the code
secondInput_ECM>>stiffnessECMBasal ;
secondInput_ECM>>stiffnessECMBC ;
secondInput_ECM>>stiffnessECMPerip ;
secondInput_ECM>>lknotECMBasal ;
secondInput_ECM>>lknotECMBC ;
secondInput_ECM>>lknotECMPerip ;
secondInput_ECM>>dampBasal ;
secondInput_ECM>>dampBC ;
secondInput_ECM>>dampApical ;
cout <<" stiffness of ECM at the basal side is="<<stiffnessECMBasal <<endl ;
cout <<" stiffness of ECM at boundary is="<<stiffnessECMBC<<endl ;
cout <<" stiffness of ECM peripodial side is="<<stiffnessECMPerip<<endl ;
cout <<" rest len basal ECM is="<<lknotECMBasal<<endl ;
cout <<" rest len boundary ECM is= "<<lknotECMBC<<endl ;
cout << "rest len peripodial ECM is=" <<lknotECMPerip <<endl ;
cout << "Damping for basal ECM is="<<dampBasal<<endl ;
cout << "Damping for boundary ECM is= "<<dampBC<<endl ;
cout << "Damping for peripodial ECM is=" <<dampApical <<endl ;
cout << "number of ECM nodes is"<< numberNodes_ECM <<endl ;
for (int i=0 ; i<5; i++) {
cout <<"Morse parameter number"<<i<<" is " <<mechPara_ECM.sceInterCellCPU_ECM[i]<<endl ;
}
//cout <<"rest length of ECM spring is "<<restLenECMSpring<<endl ;
// cout <<"ECM spring stiffness is "<<eCMLinSpringStiff<<endl ;
cout <<"ECM Membrane neutral adhesion length is "<<restLenECMAdhSpring<<endl ;
cout <<"ECM Membrane max adhesion length is "<<maxLenECMAdhSpring<<endl ;
cout <<"ECM Membrane adhesion stiffness is "<<kAdhECM<<endl ;
cout << "ECM only applies adhesvie force" << endl ;
//for ( int i=0 ; i<4 ; i++) {
// cout<<"wLC parameter "<< i << " is "<<mechPara_ECM.wLCParaCPU_ECM[i]<<endl ; ;
//}
cudaMemcpyToSymbol(sceInterCell_ECM,mechPara_ECM.sceInterCellCPU_ECM
,5*sizeof(double));
//cudaMemcpyToSymbol(wLCPara_ECM,mechPara_ECM.wLCParaCPU_ECM
// ,4*sizeof(double));
cudaMemcpyToSymbol(restLenECMAdhSpringGPU, &restLenECMAdhSpring,sizeof(double));
cudaMemcpyToSymbol(maxLenECMAdhSpringGPU, &maxLenECMAdhSpring,sizeof(double));
cudaMemcpyToSymbol(kAdhECMGPU, &kAdhECM,sizeof(double));
cudaMemcpyToSymbol(stiffnessECMPeripGPU, &stiffnessECMPerip,sizeof(double));
cudaMemcpyToSymbol(stiffnessECMBCGPU, &stiffnessECMBC,sizeof(double));
cudaMemcpyToSymbol(stiffnessECMBasalGPU, &stiffnessECMBasal,sizeof(double));
cudaMemcpyToSymbol(lknotECMPeripGPU, & lknotECMPerip,sizeof(double));
cudaMemcpyToSymbol(lknotECMBCGPU, & lknotECMBC,sizeof(double));
cudaMemcpyToSymbol(lknotECMBasalGPU, & lknotECMBasal,sizeof(double));
counter=100000 ; //large number
lastPrintECM=1000000 ; // large number
outputFrameECM=0 ;
numNodesECM= numberNodes_ECM ; //(eCMMaxX-eCMMinX)/eCMMinDist ;
indexECM.resize(numNodesECM,0) ;
peripORexcm.resize(numNodesECM,perip) ;
dampCoef.resize(numNodesECM) ;
nodeECMLocX.resize(numNodesECM,0.0) ;
nodeECMLocY.resize(numNodesECM,0.0) ;
cellNeighborId.resize(numNodesECM,-1) ;
stiffLevel.resize(numNodesECM) ;
sponLen.resize(numNodesECM) ;
linSpringForceECMX.resize(numNodesECM,0.0);
linSpringForceECMY.resize(numNodesECM,0.0);
linSpringAvgTension.resize(numNodesECM,0.0);
linSpringEnergy.resize(numNodesECM,0.0);
morseEnergy.resize(numNodesECM,0.0);
adhEnergy.resize(numNodesECM,0.0);
bendSpringForceECMX.resize(numNodesECM,0.0);
bendSpringForceECMY.resize(numNodesECM,0.0);
memMorseForceECMX.resize(numNodesECM,0.0);
memMorseForceECMY.resize(numNodesECM,0.0);
fBendCenterX.resize(numNodesECM,0.0);
fBendCenterY.resize(numNodesECM,0.0);
fBendLeftX.resize(numNodesECM,0.0);
fBendLeftY.resize(numNodesECM,0.0);
fBendRightX.resize(numNodesECM,0.0);
fBendRightY.resize(numNodesECM,0.0);
totalForceECMX.resize(numNodesECM,0.0);
totalForceECMY.resize(numNodesECM,0.0);
totalExplicitForceECMX.resize(numNodesECM,0.0);
totalExplicitForceECMY.resize(numNodesECM,0.0);
rHSX.resize(numNodesECM,0.0);
rHSY.resize(numNodesECM,0.0);
//memNodeType.resize(maxTotalNodes,notAssigned1) ;
nodeIsActive.resize(numNodesECM,true) ;
thrust::sequence (indexECM.begin(),indexECM.begin()+numNodesECM);
thrust::copy(posXIni_ECM.begin(),posXIni_ECM.end(),nodeECMLocX.begin()) ;
thrust::copy(posYIni_ECM.begin(),posYIni_ECM.end(),nodeECMLocY.begin()) ;
thrust::copy(eNodeVec.begin(),eNodeVec.end(),peripORexcm.begin()) ;
AssignDampCoef() ;
// cout << "GPU level initial coordinates and type of external nodes are: " << endl ;
// for (int i=0; i<nodeECMLocX.size() ; i++) {
// cout<< nodeECMLocX[i]<<", "<<nodeECMLocY[i]<<", "<<peripORexcm[i] << endl;
// }
PrintECM(0.0) ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol + ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str());
EnergyExport <<"Time,"<<"TotalMorseEnergyECM," << "TotalAdhEnergyECM,"<<"TotalLinSpringEnergy,"<<"TotalEnergy, " <<"TotalEnergyDerivative"<< std::endl;
} //initilaization function finished
void SceECM:: ApplyECMConstrain(int currentActiveCellCount, int totalNodeCountForActiveCellsECM, double curTime, double dt, double Damp_CoefCell, bool cellPolar, bool subCellPolar, bool isInitPhase, double timeRatio, double timeRatio_Crit_ECM, double timeRatio_Crit_Division){
bool implicit_solver_active = false ;
if (eCMRemoved) {
PrintECMRemoved(curTime);
cout << "ECM is removed" << endl ;
return ;
}
// if (timeRatio == timeRatio_Crit_ECM){
// cout<<"Localized ECM weakening is triggered"<<endl;
// }
#ifdef debugModeECM
cudaEvent_t start1, start2, start3, start4, start5, start6, start7, start8, stop;
float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ;
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&start3);
cudaEventCreate(&start4);
cudaEventCreate(&start5);
cudaEventCreate(&start6);
cudaEventCreate(&start7);
cudaEventCreate(&start8);
cudaEventCreate(&stop);
cudaEventRecord(start1, 0);
#endif
nodeCellLocXOld.resize(totalNodeCountForActiveCellsECM) ;
nodeCellLocYOld.resize(totalNodeCountForActiveCellsECM) ;
integrinMultipOld.resize(totalNodeCountForActiveCellsECM) ;
// nodeCellLocZOld.resize(totalNodeCountForActiveCellsECM) ;
adhPairECM_Cell.resize(totalNodeCountForActiveCellsECM,-1) ;
morseEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
adhEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0);
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocX.begin(),nodesPointerECM->getInfoVecs().nodeLocX.begin()+totalNodeCountForActiveCellsECM,nodeCellLocXOld.begin()) ;
thrust::copy(nodesPointerECM->getInfoVecs().nodeLocY.begin(),nodesPointerECM->getInfoVecs().nodeLocY.begin()+totalNodeCountForActiveCellsECM,nodeCellLocYOld.begin()) ;
thrust::copy(nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin(),nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin()+totalNodeCountForActiveCellsECM,integrinMultipOld.begin()) ;
// thrust::copy(nodesPointerECM->getInfoVecs().nodeLocZ.begin(),nodesPointerECM->getInfoVecs().nodeLocZ.begin()+totalNodeCountForActiveCellsECM,nodeCellLocZOld.begin()) ;
#ifdef debugModeECM
cudaEventRecord(start2, 0);
cudaEventSynchronize(start2);
cudaEventElapsedTime(&elapsedTime1, start1, start2);
#endif
thrust:: transform (peripORexcm.begin(), peripORexcm.begin()+numNodesECM,
thrust::make_zip_iterator (thrust::make_tuple (stiffLevel.begin(),sponLen.begin())),MechProp());
// cout << " Mechanical properties after assignment is " << stiffLevel[0] << endl ;
counter ++ ;
//if (counter>=100 || curTime<(100*dt) || isECMNeighborSet==false) {
// if (curTime<(100*dt) || isECMNeighborSet==false) {
if (curTime < (100*dt) || isECMNeighborSet == false){
isECMNeighborSet=true ;
counter=0 ;
FindNeighborCandidateForCellsAndECMNodes();
}
// else if (timeRatio > timeRatio_Crit_Division && isECMNeighborResetPostDivision == false){
// if (cellsPointerECM->getCellInfoVecs().isOneTimeStepPostDivision ==true || cellsPointerECM->getCellInfoVecs().isTwoTimeStepPostDivision ==true){
if (cellsPointerECM->getCellInfoVecs().isPostDivision ==true || cellsPointerECM->getCellInfoVecs().isPostAddMembrNodes == true){
std::cout<<"Resetting ecm and cell neighbor info! post division!"<<std::endl;
FindNeighborCandidateForCellsAndECMNodes();
isECMNeighborResetPostDivision=true;
}
// if (timeRatio == timeRatio_Crit_Division){
// isECMNeighborResetPostDivision=false;
// }
// }
#ifdef debugModeECM
cudaEventRecord(start3, 0);
cudaEventSynchronize(start3);
cudaEventElapsedTime(&elapsedTime2, start2, start3);
#endif
MoveCellNodesByECMForces(totalNodeCountForActiveCellsECM,currentActiveCellCount,dt, Damp_CoefCell) ;
/* To reduce computational cost
energyECM.totalMorseEnergyCellECM = thrust::reduce( morseEnergyCell.begin(),morseEnergyCell.begin()+totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyCellECM = thrust::reduce( adhEnergyCell.begin() ,adhEnergyCell.begin() +totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() );
*/
CalLinSpringForce(timeRatio, timeRatio_Crit_ECM);
CalBendSpringForce();
#ifdef debugModeECM
cudaEventRecord(start4, 0);
cudaEventSynchronize(start4);
cudaEventElapsedTime(&elapsedTime3, start3, start4);
#endif
CalCellForcesOnECM() ;
//energyECM.totalLinSpringEnergyECM = 0.5 * ( thrust::reduce( linSpringEnergy.begin(),linSpringEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() ));
//to make sure it is based on the distance used for action force calculation.
/* To reduce computational cost
energyECM.totalMorseEnergyECMCell = thrust::reduce( morseEnergy.begin(),morseEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() );
energyECM.totalAdhEnergyECMCell = thrust::reduce( adhEnergy.begin() ,adhEnergy.begin() +numNodesECM,(double) 0.0, thrust::plus<double>() );
*/
if (!implicit_solver_active) {
// Calculate summation of all forces and move nodes if explicit solver is going to be used
CalSumForcesOnECM() ;
MoveNodesBySumAllForces(dt) ;
}
if (implicit_solver_active) {
//Calculate right hand side of implicit solver which includes explicit forces
CalSumOnlyExplicitForcesOnECM() ;
CalRHS(dt) ;
}
#ifdef debugModeECM
cudaEventRecord(start5, 0);
cudaEventSynchronize(start5);
cudaEventElapsedTime(&elapsedTime4, start4, start5);
#endif
//Create tmp CPU vectors for using in implicit solver. Declariation is not acceptable to be inisde the if condition
vector <double> tmpRHSX(numNodesECM);
vector <double> tmpRHSY(numNodesECM);
tmpHostNodeECMLocX.resize(numNodesECM);
tmpHostNodeECMLocY.resize(numNodesECM);
if (implicit_solver_active) {
// Copy ECM locations from GPU to CPU if implicit solver is used
thrust::copy (rHSX.begin(), rHSX.begin()+numNodesECM, tmpRHSX.begin());
thrust::copy (rHSY.begin(), rHSY.begin()+numNodesECM, tmpRHSY.begin());
thrust::copy (nodeECMLocX.begin(), nodeECMLocX.begin()+numNodesECM, tmpHostNodeECMLocX.begin());
thrust::copy (nodeECMLocY.begin(), nodeECMLocY.begin()+numNodesECM, tmpHostNodeECMLocY.begin());
//cout << "max RHSX is " << *max_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
//cout << "min RHSX is " << *min_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ;
//cout << "max RHSY is " << *max_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
//cout << "min RHSY is " << *min_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ;
}
#ifdef debugModeECM
cudaEventRecord(start6, 0);
cudaEventSynchronize(start6);
cudaEventElapsedTime(&elapsedTime5, start5, start6);
#endif
if (implicit_solver_active) {
// setting up eqaution of motion if implicit solver is used
EquMotionCoef (dt);
}
#ifdef debugModeECM
cudaEventRecord(start7, 0);
cudaEventSynchronize(start7);
cudaEventElapsedTime(&elapsedTime6, start6, start7);
#endif
if (implicit_solver_active) {
// Fetch the implicit solver and update ECM location if implicit solver is used
tmpHostNodeECMLocX =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSX,indexPrev, indexNext, tmpHostNodeECMLocX);
tmpHostNodeECMLocY =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSY,indexPrev,indexNext, tmpHostNodeECMLocY);
// copy ECM node locations back from CPU to GPU if implicit solver is used
thrust::copy (tmpHostNodeECMLocX.begin(), tmpHostNodeECMLocX.begin()+numNodesECM, nodeECMLocX.begin());
thrust::copy (tmpHostNodeECMLocY.begin(), tmpHostNodeECMLocY.begin()+numNodesECM, nodeECMLocY.begin());
}
#ifdef debugModeECM
cudaEventRecord(start8, 0);
cudaEventSynchronize(start8);
cudaEventElapsedTime(&elapsedTime7, start7, start8);
#endif
/* To reduce computational cost
cout << "total Morse energy for cell-ECM is= "<< energyECM.totalMorseEnergyCellECM << endl ;
cout << "total Morse energy for ECM-cell is= "<< energyECM.totalMorseEnergyECMCell << endl ;
cout << "total adhesion energy for cell-ECM is= "<< energyECM.totalAdhEnergyCellECM << endl ;
cout << "total adhesion energy for ECM-cell is= "<< energyECM.totalAdhEnergyECMCell << endl ;
//assert (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)<1.0) ;
//assert (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) <1.0) ;
if ( (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)>1.0) ||
(abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) >1.0)
) {
cout << "Warning: Action and reaction forces in the ECM do not match each other" << endl ;
}
*/
# ifdef debugModeECM
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime8, start8, stop);
std::cout << "time 1 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ;
std::cout << "time 2 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ;
std::cout << "time 3 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ;
std::cout << "time 4 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ;
std::cout << "time 5 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ;
std::cout << "time 6 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ;
std::cout << "time 7 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ;
std::cout << "time 8 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ;
#endif
//throw std::invalid_argument(" Solver called properly and I want to stop the code");
PrintECM(curTime);
}
void SceECM:: PrintECM(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in regular print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
<< 0.0 << std::endl;
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
// second output file for curvature estimation //
std::string txtFileName = "./ECMFolder/ECMLocationExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMLocationExport ;
ECMLocationExport.open(txtFileName.c_str());
//ECMExport << "ECM pouch coordinates" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==excm) {
ECMLocationExport<< nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << " "<< peripORexcm[i]<<std::endl;
// }
}
//ECMExport << "ECM lumen side coordinates" << std::endl;
// for (uint i = 0; i < nodeECMLocX.size(); i++) {
// if (peripORexcm[i]==perip) {
// ECMLocationExport << nodeECMLocX[i] << " " << nodeECMLocY[i] << " "
// << 0.0 << std::endl;
// }
// }
ECMLocationExport.close();
//Third write file for ECM
txtFileName = "./ECMFolder/ECMTensionExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt";
ofstream ECMTensionExport ;
ECMTensionExport.open(txtFileName.c_str());
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMTensionExport<< linSpringAvgTension[i]<< " " << peripORexcm[i]<< std::endl;
}
ECMTensionExport.close();
///
//Fourth write file for ECM
energyECM.totalEnergyECMOld=energyECM.totalEnergyECM ;
energyECM.totalEnergyECM= energyECM.totalMorseEnergyECMCell
+ energyECM.totalAdhEnergyECMCell
+ energyECM.totalLinSpringEnergyECM ;
std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol+ ".CSV";
ofstream EnergyExport ;
EnergyExport.open(cSVFileName.c_str(),ofstream::app);
//EnergyExport <<"totalMorseEnergyCell " << "totalAdhEnergyCell "<< "totalMorseEnergy "<<"totalAdhEnergy "<< "totalLinSpringEnergy " << std::endl;
EnergyExport <<curTime<<","<<energyECM.totalMorseEnergyECMCell << "," << energyECM.totalAdhEnergyECMCell<< "," << energyECM.totalLinSpringEnergyECM <<"," << energyECM.totalEnergyECM <<","<<energyECM.totalEnergyPrimeECM <<std::endl;
}
}
// This is just to create a file to be able to generate the movie with consisten frames
void SceECM:: PrintECMRemoved(double curTime) {
lastPrintECM=lastPrintECM+1 ;
if (lastPrintECM>=freqPlotData) {
outputFrameECM++ ;
lastPrintECM=0 ;
cout << " I am in ECM removed print function" << endl ;
// First ECM output file for paraview //
std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk";
ofstream ECMOut;
ECMOut.open(vtkFileName.c_str());
ECMOut<< "# vtk DataFile Version 3.0" << endl;
ECMOut<< "Result for paraview 2d code" << endl;
ECMOut << "ASCII" << endl;
ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl;
ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut << -500.0 << " " << -500.0 << " "
<< 0.0 << std::endl; // Just out of domain
}
ECMOut<< std::endl;
ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl;
for (uint i = 0; i < (nodeECMLocX.size()-1); i++) {
ECMOut << 2 << " " << indexECM[i] << " "
<< indexECM[i+1] << std::endl;
}
ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point
ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl;
for (uint i = 0; i < nodeECMLocX.size() ; i++) {
ECMOut << "3" << endl;
}
ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ;
ECMOut << "SCALARS Avg_Tension " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<linSpringAvgTension[i] <<endl ;
}
ECMOut << "SCALARS Node_Type " << "float"<< endl;
ECMOut << "LOOKUP_TABLE " << "default"<< endl;
for (uint i = 0; i < nodeECMLocX.size(); i++) {
ECMOut<<peripORexcm[i] <<endl ;
}
ECMOut.close();
}
}
AniResumeData SceECM:: obtainResumeData() {
AniResumeData aniResumeData ;
thrust:: host_vector<double> hostTmpLocX;
thrust:: host_vector<double> hostTmpLocY;
thrust:: host_vector<EType> hostTmpType;
hostTmpLocX.resize(numNodesECM) ;
hostTmpLocY.resize(numNodesECM) ;
hostTmpType.resize(numNodesECM) ;
cout << " I am in obtainResumeData function" << endl ;
thrust::copy (
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin()))+numNodesECM,
thrust::make_zip_iterator(
thrust::make_tuple(hostTmpLocX.begin(),hostTmpLocY.begin(),hostTmpType.begin())));
cout << " I start passing to regular vector variables" << endl ;
CVector tmp;
for( int i=0 ; i<numNodesECM ; i++) {
tmp=CVector (hostTmpLocX[i], hostTmpLocY[i], 0.0) ;
aniResumeData.nodePosArr.push_back(tmp) ;
aniResumeData.nodeECMType.push_back(hostTmpType[i]) ;
}
return aniResumeData ;
}
void SceECM::EquMotionCoef (double dt) {
vector <double> sponLenHost(numNodesECM) ;
vector <double> sponLenWithNext ;
vector <double> sponLenWithPrev ;
vector <double> distWithNext ;
vector <double> distWithPrev ;
vector <double> dampCoefHost ;
sponLenWithNext.clear();
sponLenWithPrev.clear();
distWithNext.clear() ;
distWithPrev.clear() ;
hCoefLd.clear() ;
hCoefUd.clear() ;
hCoefD.clear() ;
indexNext.clear() ;
indexPrev.clear() ;
dampCoefHost.clear() ;
indexNext.resize(numNodesECM) ;
indexPrev.resize(numNodesECM) ;
dampCoefHost.resize(numNodesECM) ;
thrust::copy(sponLen.begin(),sponLen.begin()+numNodesECM, sponLenHost.begin()) ;
thrust::copy(dampCoef.begin(),dampCoef.begin()+numNodesECM, dampCoefHost.begin()) ;
double k=stiffLevel[0] ; //Assumming ECM is homogenous in mechanical properties
for ( int i=0 ; i< numNodesECM ; i++) {
indexNext.at(i)=i+1 ;
indexPrev.at(i)=i-1 ;
if (i==numNodesECM-1){
indexNext.at(i)=0 ;
}
if (i==0){
indexPrev.at(i)=numNodesECM-1 ;
}
sponLenWithNext.push_back( 0.5*(sponLenHost[indexNext.at(i)]+sponLenHost[i]) );
sponLenWithPrev.push_back( 0.5*(sponLenHost[indexPrev.at(i)]+sponLenHost[i]) );
distWithNext.push_back(sqrt( pow(tmpHostNodeECMLocX[indexNext.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexNext.at(i)]-tmpHostNodeECMLocY[i],2))) ;
distWithPrev.push_back(sqrt( pow(tmpHostNodeECMLocX[indexPrev.at(i)]-tmpHostNodeECMLocX[i],2) +
pow(tmpHostNodeECMLocY[indexPrev.at(i)]-tmpHostNodeECMLocY[i],2)));
}
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefD.push_back (1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
hCoefLd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ))) ;
hCoefUd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ;
}
#ifdef debugModeECM
cout <<"max distance with next node is" <<*max_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"min distance with next node is" << *min_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ;
cout <<"max distance with previous node is" <<*max_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
cout <<"min distance with previous node is" << *min_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ;
vector < double> hCoefDAbs;
hCoefDAbs.clear() ;
for ( int i=0 ; i< numNodesECM ; i++) {
hCoefDAbs.push_back (abs(1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 )
- sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 )))) ;
}
cout <<"max main diag. elment is " << *max_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main diag. element is " << *min_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ;
cout <<"min main Abs(diag.) element is " << *min_element ( hCoefDAbs.begin(), hCoefDAbs.begin() +numNodesECM) <<endl ;
cout <<"max upper diag. element is " << *max_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"min upper diag. element is " << *min_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ;
cout <<"max lower diag. element is " << *max_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"min lower diag. element is " << *min_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ;
cout <<"stiffness, time step and first element of damping vector is " << endl ;
cout << k <<","<< dt<<"," << dampCoefHost.at(0) << endl ;
cout << "constants for stiffness matrix calculated " << endl ;
cout << "last diagonal element is " << hCoefD.at(numNodesECM-1) << endl ;
cout << " number of ECM nodes is "<< numNodesECM << endl ;
# endif
}
void SceECM::MoveCellNodesByECMForces(int totalNodeCountForActiveCellsECM,int currentActiveCellCount, double dt, double Damp_CoefCell)
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
bool* isEnteringMitotic = thrust::raw_pointer_cast(
&(cellsPointerECM->getCellInfoVecs().isEnteringMitotic[0]));
// move the nodes of epithelial cells
//// find the closest ECM node to each each cell //
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust::counting_iterator<int> iBegin(0) ;
thrust::counting_iterator<int> iBegin2(0) ;
//////////////////////////////////////////
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin(),
nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin()
)),
thrust::make_zip_iterator (
thrust:: make_tuple (
make_permutation_iterator(
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
make_transform_iterator(iBegin2,
DivideFunctor2(
maxAllNodePerCell))),
make_transform_iterator (iBegin,
DivideFunctor2(maxAllNodePerCell)),
make_transform_iterator (iBegin,
ModuloFunctor2(maxAllNodePerCell)),
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
nodesPointerECM->getInfoVecs().nodeIsActive.begin(),
nodesPointerECM->getInfoVecs().memNodeType1.begin(),
nodesPointerECM->getInfoVecs().nodeIntegrinMultip.begin()
))+totalNodeCountForActiveCellsECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodesPointerECM->getInfoVecs().nodeLocX.begin(),
nodesPointerECM->getInfoVecs().nodeLocY.begin(),
adhPairECM_Cell.begin(),
morseEnergyCell.begin(),
adhEnergyCell.begin())),
MoveNodes2_Cell(nodeECMLocXAddr,nodeECMLocYAddr,maxMembrNodePerCell,numNodesECM,dt,Damp_CoefCell,peripORexcmAddr,currentActiveCellCount, isEnteringMitotic));
}
void SceECM::CalLinSpringForce(double timeRatio, double timeRatio_Crit_ECM)
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double* stiffLevelAddr=thrust::raw_pointer_cast (
&stiffLevel[0]) ;
double* sponLenAddr =thrust::raw_pointer_cast (
&sponLen[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
linSpringAvgTension.begin(),
linSpringEnergy.begin())),
LinSpringForceECM(numNodesECM,nodeECMLocXAddr,nodeECMLocYAddr,stiffLevelAddr,sponLenAddr, timeRatio, timeRatio_Crit_ECM));
//////////////////////////////////// find the closest Cell to each ECM node ///////////
///////////////////////////////////
//cout << " I am after FindCellNeighbor functor" << endl ;
}
void SceECM::CalBendSpringForce()
{
const double eCMBendStiff=6.0 ; // need to be an input
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
fBendCenterX.begin(),
fBendCenterY.begin(),
fBendLeftX.begin(),
fBendLeftY.begin(),
fBendRightX.begin(),
fBendRightY.begin())),
CalBendECM(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM,eCMBendStiff));
double* fBendLeftXAddr= thrust::raw_pointer_cast (
&fBendLeftX[0]) ;
double* fBendLeftYAddr= thrust::raw_pointer_cast (
&fBendLeftY[0]) ;
double* fBendRightXAddr= thrust::raw_pointer_cast (
&fBendRightX[0]) ;
double* fBendRightYAddr= thrust::raw_pointer_cast (
&fBendRightY[0]) ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
fBendCenterX.begin(),
fBendCenterY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin())),
SumBendForce(fBendLeftXAddr,fBendLeftYAddr,fBendRightXAddr,fBendRightYAddr,numNodesECM));
}
void SceECM::CalCellForcesOnECM()
{
bool* nodeIsActiveAddr= thrust::raw_pointer_cast (
& (nodesPointerECM->getInfoVecs().nodeIsActive[0])) ;
int * adhPairECM_CellAddr= thrust::raw_pointer_cast (
&adhPairECM_Cell[0]) ;
//Old locations are chosen to make sure action-reaction balance of forces between ECM and cell nodes are fully satisfied.
double* nodeCellLocXAddr= thrust::raw_pointer_cast (
&nodeCellLocXOld[0]) ;
double* nodeCellLocYAddr= thrust::raw_pointer_cast (
&nodeCellLocYOld[0]) ;
// double* nodeCellLocZAddr= thrust::raw_pointer_cast (
// &nodeCellLocZOld[0]) ;
double* integrinMultip = thrust::raw_pointer_cast (
&integrinMultipOld[0]);
bool* isEnteringMitotic = thrust::raw_pointer_cast (
&(cellsPointerECM->getCellInfoVecs().isEnteringMitotic[0]));
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
indexECM.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
cellNeighborId.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
memMorseForceECMX.begin(),
memMorseForceECMY.begin(),
morseEnergy.begin(),
adhEnergy.begin())),
MorseAndAdhForceECM(numCells,maxAllNodePerCell,maxMembrNodePerCell,nodeCellLocXAddr,nodeCellLocYAddr,integrinMultip,nodeIsActiveAddr,adhPairECM_CellAddr, isEnteringMitotic));
}
void SceECM::CalSumForcesOnECM()
{
double dummy=0.0 ;
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
linSpringForceECMX.begin(),
linSpringForceECMY.begin(),
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalForceECMX.begin(),
totalForceECMY.begin())),
TotalECMForceCompute(dummy));
}
void SceECM::CalSumOnlyExplicitForcesOnECM() {
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
bendSpringForceECMX.begin(),
bendSpringForceECMY.begin(),
memMorseForceECMX.begin(),
memMorseForceECMY.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin())),
TotalExplicitECMForceCompute());
}
void SceECM::CalRHS(double dt)
{
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
totalExplicitForceECMX.begin(),
totalExplicitForceECMY.begin(),
nodeECMLocX.begin(),
nodeECMLocY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
rHSX.begin(),
rHSY.begin())),
RHSCompute(dt));
}
void SceECM::MoveNodesBySumAllForces(double dt)
{
// move the nodes of ECM
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin(),
totalForceECMX.begin(),
totalForceECMY.begin(),
dampCoef.begin()))+numNodesECM,
thrust::make_zip_iterator (
thrust::make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
MoveNodesECM(dt));
}
void SceECM::FindNeighborCandidateForCellsAndECMNodes()
{
double* nodeECMLocXAddr= thrust::raw_pointer_cast (
&nodeECMLocX[0]) ;
double* nodeECMLocYAddr= thrust::raw_pointer_cast (
&nodeECMLocY[0]) ;
double * basalCellLocXAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocX[0]) ) ;
double * basalCellLocYAddr= thrust::raw_pointer_cast (
& ( cellsPointerECM->getCellInfoVecs().basalLocY[0]) ) ;
EType* peripORexcmAddr= thrust::raw_pointer_cast (
&peripORexcm[0]) ;
int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ;
if (cellsPointerECM->getCellInfoVecs().basalLocX.size()>86){
// std::cout<<"In SceECM.cu, basalLoc[86] = "<<cellsPointerECM->getCellInfoVecs().basalLocX[86]<<" "<<cellsPointerECM->getCellInfoVecs().basalLocY[86]<<std::endl;
// std::cout<<"In SceECM.cu, basalLoc[87] = "<<cellsPointerECM->getCellInfoVecs().basalLocX[87]<<" "<<cellsPointerECM->getCellInfoVecs().basalLocY[87]<<std::endl;
}
//// find the closest ECM node to each each cell //
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
cellsPointerECM->getCellInfoVecs().basalLocX.begin(),
cellsPointerECM->getCellInfoVecs().basalLocY.begin()))+numCells,
cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(),
FindECMNeighborPerCell(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM ));
thrust:: transform (
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin())),
thrust::make_zip_iterator (
thrust:: make_tuple (
nodeECMLocX.begin(),
nodeECMLocY.begin()))+numNodesECM,
cellNeighborId.begin(),
FindCellNeighborPerECMNode(basalCellLocXAddr,basalCellLocYAddr, numCells));
}
void SceECM::AssignDampCoef() {
thrust::transform ( peripORexcm.begin() ,peripORexcm.begin() +numNodesECM, dampCoef.begin(), AssignDamping(dampBasal,dampBC,dampApical) );
#ifdef debugModeECM
for (int i=0 ; i<numNodesECM ; i++) {
if (dampCoef[i] < smallNumber) {
cout << "damping of element " << i << " is " << dampCoef[i] << " which is wrong" <<endl ;
throw::invalid_argument ( "damping coefficients in ECM is not set correctly") ;
}
}
#endif
}
|
53c1471fd993f187902ef613277b767dcff7e432.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <hip/hip_complex.h>
__global__ void mul_scalar_double(int n,int idx, double dx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] * dx;
}
}
|
53c1471fd993f187902ef613277b767dcff7e432.cu
|
extern "C"
#include <cuComplex.h>
__global__ void mul_scalar_double(int n,int idx, double dx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] * dx;
}
}
|
6407f854b7a7ed4ffa2b98fafbe84d1b44464786.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "MarkSplits.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
bool force = XSIZE*YSIZE;
int minPartSize = XSIZE*YSIZE;
int maxPartSize = XSIZE*YSIZE;
int *partSizes = NULL;
hipMalloc(&partSizes, XSIZE*YSIZE);
int *splitsToMake = NULL;
hipMalloc(&splitsToMake, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
MarkSplits), dim3(gridBlock),dim3(threadBlock), 0, 0, size,force,minPartSize,maxPartSize,partSizes,splitsToMake);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
MarkSplits), dim3(gridBlock),dim3(threadBlock), 0, 0, size,force,minPartSize,maxPartSize,partSizes,splitsToMake);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
MarkSplits), dim3(gridBlock),dim3(threadBlock), 0, 0, size,force,minPartSize,maxPartSize,partSizes,splitsToMake);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6407f854b7a7ed4ffa2b98fafbe84d1b44464786.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "MarkSplits.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int size = XSIZE*YSIZE;
bool force = XSIZE*YSIZE;
int minPartSize = XSIZE*YSIZE;
int maxPartSize = XSIZE*YSIZE;
int *partSizes = NULL;
cudaMalloc(&partSizes, XSIZE*YSIZE);
int *splitsToMake = NULL;
cudaMalloc(&splitsToMake, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
MarkSplits<<<gridBlock,threadBlock>>>(size,force,minPartSize,maxPartSize,partSizes,splitsToMake);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
MarkSplits<<<gridBlock,threadBlock>>>(size,force,minPartSize,maxPartSize,partSizes,splitsToMake);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
MarkSplits<<<gridBlock,threadBlock>>>(size,force,minPartSize,maxPartSize,partSizes,splitsToMake);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
bc94910a9af3a21e76398675f5fef79d34524c97.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2011 University of Sheffield.
* Author: Dr Paul Richmond
* Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
#if defined(__NVCC__) && defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 9
// Disable annotation on defaulted function warnings (glm 0.9.9 and CUDA 9.0 introduced this warning)
#pragma diag_suppress esa_on_defaulted_function_ignored
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda_gl_interop.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <glm/glm.hpp>
#include "header.h"
//LOD counts
uint lod1_count;
uint lod2_count;
uint lod3_count;
//LOD feedback memory
uint* d_lod_counts;
uint* d_lod_counts_reduced;
size_t pitch;
uint pitch_int;
/* Error check function for safe CUDA API calling */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* Error check function for post CUDA Kernel calling */
#define gpuErrchkLaunch() { gpuLaunchAssert(__FILE__, __LINE__); }
inline void gpuLaunchAssert(const char *file, int line, bool abort=true)
{
gpuAssert( hipPeekAtLastError(), file, line );
#ifdef _DEBUG
gpuAssert( hipDeviceSynchronize(), file, line );
#endif
}
//KERNEL DEFINITIONS
/** output_navmaps_to_TBO
* Outputs navmap agent data from FLAME GPU to a 4 component vector used for instancing
* @param agents pedestrian agent list from FLAME GPU
* @param data1 four component vector used to output instance data
* @param data2 four component vector used to output instance data
*/
__global__ void output_pedestrians_to_TBO(xmachine_memory_agent_list* agents, glm::vec4* data1, glm::vec4* data2){
//global thread index
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
data1[index].x = agents->x[index];
data1[index].y = agents->y[index];
data1[index].z = agents->animate[index];
data1[index].w = agents->height[index];
data2[index].x = agents->velx[index];
data2[index].y = agents->vely[index];
data2[index].z = (float)agents->lod[index];
data2[index].w = 0.0;
}
/** generate_agent_keyvalue_pairs
* Outputs key value pairs based on agents LOD used to sort pesestrain agents by LOD
* @param keys sort key outpts lists
* @param values sort identifiers output list
* @param agents pedestrian agent list from FLAME GPU
*/
__global__ void generate_agent_keyvalue_pairs(uint* keys, uint* values, xmachine_memory_agent_list* agents)
{
unsigned int index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int sort_val = (uint)agents->lod[index];
keys[index] = sort_val;
values[index] = index;
}
/** generate_agent_lods
* Creates 3 rows of flags (1 or 0) used to indicate the level of detail for each agent. A global reduction is then used for each list to calculate the number of each LOD in the population
* @param pitch memory pitch of each row
* @param lods block of memory for 3 rows of data
* @param agents pedestrian agent list from FLAME GPU
*/
__global__ void generate_agent_lods(uint pitch, uint* lods, xmachine_memory_agent_list* agents)
{
unsigned int index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int lod_val = agents->lod[index];
//uint* x = (uint*)((char*)lods + 0 * pitch) + index;
uint lod_x = __mul24(pitch, 0) + index;
uint lod_y = __mul24(pitch, 1) + index;
uint lod_z = __mul24(pitch, 2) + index;
if (lod_val == 1)
lods[lod_x] = 1;
else if (lod_val == 2)
lods[lod_y] = 1;
else if (lod_val == 3)
lods[lod_z] = 1;
}
//EXTERNAL FUNCTIONS DEFINED IN PedestrianPopulation.h
extern void initGPULODFeedback()
{
size_t width;
size_t height;
//gpuErrchk( hipMalloc( (void**) &d_lod_counts, sizeof(lod_count_list)));
width = xmachine_memory_agent_MAX * sizeof(uint);
height = 3;
gpuErrchk( hipMallocPitch( (void**) &d_lod_counts, &pitch, width, height));
gpuErrchk( hipMallocPitch( (void**) &d_lod_counts_reduced, &pitch, width, height));
pitch_int = pitch/sizeof(uint); //pitch size in in chars so normalise for int size
}
extern void generate_instances_and_LOD(GLuint* instances_data1_tbo, GLuint* instances_data2_tbo, cudaGraphicsResource_t * p_instances_data1_cgr, cudaGraphicsResource_t * p_instances_data2_cgr)
{
//kernals sizes
int threads_per_tile = 128;
int tile_size;
dim3 grid;
dim3 threads;
//pointer
glm::vec4 *dptr_1;
glm::vec4 *dptr_2;
if (get_agent_agent_default_count() > 0)
{
// map OpenGL buffer object for writing from CUDA
gpuErrchk(hipGraphicsMapResources(1, p_instances_data1_cgr));
gpuErrchk(hipGraphicsResourceGetMappedPointer( (void**)&dptr_1, 0, *p_instances_data1_cgr));
gpuErrchk(hipGraphicsMapResources(1, p_instances_data2_cgr));
gpuErrchk(hipGraphicsResourceGetMappedPointer( (void**)&dptr_2, 0, *p_instances_data2_cgr));
//cuda block size
tile_size = (int) ceil((float)get_agent_agent_default_count()/threads_per_tile);
grid = dim3(tile_size, 1, 1);
threads = dim3(threads_per_tile, 1, 1);
//kernel
hipLaunchKernelGGL(( output_pedestrians_to_TBO), dim3(grid), dim3(threads), 0, 0, get_device_agent_default_agents(), dptr_1, dptr_2);
gpuErrchkLaunch();
// unmap buffer object
gpuErrchk(hipGraphicsUnmapResources(1, p_instances_data1_cgr));
gpuErrchk(hipGraphicsUnmapResources(1, p_instances_data2_cgr));
//Sort agents by lod
sort_agents_default(&generate_agent_keyvalue_pairs);
//reset counts
gpuErrchk(hipMemset(d_lod_counts, 0, pitch*3));
//generate new counts
hipLaunchKernelGGL(( generate_agent_lods), dim3(grid), dim3(threads), 0, 0, pitch_int, d_lod_counts, get_device_agent_default_agents());
//parallel reduce
thrust::inclusive_scan(thrust::device_pointer_cast(&d_lod_counts[pitch_int*0]), thrust::device_pointer_cast(&d_lod_counts[pitch_int*0]) + get_agent_agent_default_count(), thrust::device_pointer_cast(&d_lod_counts_reduced[pitch_int*0]));
thrust::inclusive_scan(thrust::device_pointer_cast(&d_lod_counts[pitch_int*1]), thrust::device_pointer_cast(&d_lod_counts[pitch_int*1]) + get_agent_agent_default_count(), thrust::device_pointer_cast(&d_lod_counts_reduced[pitch_int*1]));
thrust::inclusive_scan(thrust::device_pointer_cast(&d_lod_counts[pitch_int*2]), thrust::device_pointer_cast(&d_lod_counts[pitch_int*2]) + get_agent_agent_default_count(), thrust::device_pointer_cast(&d_lod_counts_reduced[pitch_int*2]));
//reset and then update counts
lod1_count = 0;
lod2_count = 0;
lod3_count = 0;
gpuErrchk( hipMemcpy( &lod1_count, &d_lod_counts_reduced[(pitch_int*0)+get_agent_agent_default_count()-1], sizeof(uint), hipMemcpyDeviceToHost));
gpuErrchk( hipMemcpy( &lod2_count, &d_lod_counts_reduced[(pitch_int*1)+get_agent_agent_default_count()-1], sizeof(uint), hipMemcpyDeviceToHost));
gpuErrchk( hipMemcpy( &lod3_count, &d_lod_counts_reduced[(pitch_int*2)+get_agent_agent_default_count()-1], sizeof(uint), hipMemcpyDeviceToHost));
}
}
extern int getPedestrianLOD1Count()
{
return lod1_count;
}
extern int getPedestrianLOD2Count()
{
return lod2_count;
}
extern int getPedestrianLOD3Count()
{
return lod3_count;
}
|
bc94910a9af3a21e76398675f5fef79d34524c97.cu
|
/*
* Copyright 2011 University of Sheffield.
* Author: Dr Paul Richmond
* Contact: [email protected] (http://www.paulrichmond.staff.shef.ac.uk)
*
* University of Sheffield retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* University of Sheffield is strictly prohibited.
*
* For terms of licence agreement please attached licence or view licence
* on www.flamegpu.com website.
*
*/
#if defined(__NVCC__) && defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 9
// Disable annotation on defaulted function warnings (glm 0.9.9 and CUDA 9.0 introduced this warning)
#pragma diag_suppress esa_on_defaulted_function_ignored
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <cuda_gl_interop.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <glm/glm.hpp>
#include "header.h"
//LOD counts
uint lod1_count;
uint lod2_count;
uint lod3_count;
//LOD feedback memory
uint* d_lod_counts;
uint* d_lod_counts_reduced;
size_t pitch;
uint pitch_int;
/* Error check function for safe CUDA API calling */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* Error check function for post CUDA Kernel calling */
#define gpuErrchkLaunch() { gpuLaunchAssert(__FILE__, __LINE__); }
inline void gpuLaunchAssert(const char *file, int line, bool abort=true)
{
gpuAssert( cudaPeekAtLastError(), file, line );
#ifdef _DEBUG
gpuAssert( cudaDeviceSynchronize(), file, line );
#endif
}
//KERNEL DEFINITIONS
/** output_navmaps_to_TBO
* Outputs navmap agent data from FLAME GPU to a 4 component vector used for instancing
* @param agents pedestrian agent list from FLAME GPU
* @param data1 four component vector used to output instance data
* @param data2 four component vector used to output instance data
*/
__global__ void output_pedestrians_to_TBO(xmachine_memory_agent_list* agents, glm::vec4* data1, glm::vec4* data2){
//global thread index
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
data1[index].x = agents->x[index];
data1[index].y = agents->y[index];
data1[index].z = agents->animate[index];
data1[index].w = agents->height[index];
data2[index].x = agents->velx[index];
data2[index].y = agents->vely[index];
data2[index].z = (float)agents->lod[index];
data2[index].w = 0.0;
}
/** generate_agent_keyvalue_pairs
* Outputs key value pairs based on agents LOD used to sort pesestrain agents by LOD
* @param keys sort key outpts lists
* @param values sort identifiers output list
* @param agents pedestrian agent list from FLAME GPU
*/
__global__ void generate_agent_keyvalue_pairs(uint* keys, uint* values, xmachine_memory_agent_list* agents)
{
unsigned int index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int sort_val = (uint)agents->lod[index];
keys[index] = sort_val;
values[index] = index;
}
/** generate_agent_lods
* Creates 3 rows of flags (1 or 0) used to indicate the level of detail for each agent. A global reduction is then used for each list to calculate the number of each LOD in the population
* @param pitch memory pitch of each row
* @param lods block of memory for 3 rows of data
* @param agents pedestrian agent list from FLAME GPU
*/
__global__ void generate_agent_lods(uint pitch, uint* lods, xmachine_memory_agent_list* agents)
{
unsigned int index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int lod_val = agents->lod[index];
//uint* x = (uint*)((char*)lods + 0 * pitch) + index;
uint lod_x = __mul24(pitch, 0) + index;
uint lod_y = __mul24(pitch, 1) + index;
uint lod_z = __mul24(pitch, 2) + index;
if (lod_val == 1)
lods[lod_x] = 1;
else if (lod_val == 2)
lods[lod_y] = 1;
else if (lod_val == 3)
lods[lod_z] = 1;
}
//EXTERNAL FUNCTIONS DEFINED IN PedestrianPopulation.h
extern void initGPULODFeedback()
{
size_t width;
size_t height;
//gpuErrchk( cudaMalloc( (void**) &d_lod_counts, sizeof(lod_count_list)));
width = xmachine_memory_agent_MAX * sizeof(uint);
height = 3;
gpuErrchk( cudaMallocPitch( (void**) &d_lod_counts, &pitch, width, height));
gpuErrchk( cudaMallocPitch( (void**) &d_lod_counts_reduced, &pitch, width, height));
pitch_int = pitch/sizeof(uint); //pitch size in in chars so normalise for int size
}
extern void generate_instances_and_LOD(GLuint* instances_data1_tbo, GLuint* instances_data2_tbo, cudaGraphicsResource_t * p_instances_data1_cgr, cudaGraphicsResource_t * p_instances_data2_cgr)
{
//kernals sizes
int threads_per_tile = 128;
int tile_size;
dim3 grid;
dim3 threads;
//pointer
glm::vec4 *dptr_1;
glm::vec4 *dptr_2;
if (get_agent_agent_default_count() > 0)
{
// map OpenGL buffer object for writing from CUDA
gpuErrchk(cudaGraphicsMapResources(1, p_instances_data1_cgr));
gpuErrchk(cudaGraphicsResourceGetMappedPointer( (void**)&dptr_1, 0, *p_instances_data1_cgr));
gpuErrchk(cudaGraphicsMapResources(1, p_instances_data2_cgr));
gpuErrchk(cudaGraphicsResourceGetMappedPointer( (void**)&dptr_2, 0, *p_instances_data2_cgr));
//cuda block size
tile_size = (int) ceil((float)get_agent_agent_default_count()/threads_per_tile);
grid = dim3(tile_size, 1, 1);
threads = dim3(threads_per_tile, 1, 1);
//kernel
output_pedestrians_to_TBO<<< grid, threads>>>(get_device_agent_default_agents(), dptr_1, dptr_2);
gpuErrchkLaunch();
// unmap buffer object
gpuErrchk(cudaGraphicsUnmapResources(1, p_instances_data1_cgr));
gpuErrchk(cudaGraphicsUnmapResources(1, p_instances_data2_cgr));
//Sort agents by lod
sort_agents_default(&generate_agent_keyvalue_pairs);
//reset counts
gpuErrchk(cudaMemset(d_lod_counts, 0, pitch*3));
//generate new counts
generate_agent_lods<<<grid, threads>>>(pitch_int, d_lod_counts, get_device_agent_default_agents());
//parallel reduce
thrust::inclusive_scan(thrust::device_pointer_cast(&d_lod_counts[pitch_int*0]), thrust::device_pointer_cast(&d_lod_counts[pitch_int*0]) + get_agent_agent_default_count(), thrust::device_pointer_cast(&d_lod_counts_reduced[pitch_int*0]));
thrust::inclusive_scan(thrust::device_pointer_cast(&d_lod_counts[pitch_int*1]), thrust::device_pointer_cast(&d_lod_counts[pitch_int*1]) + get_agent_agent_default_count(), thrust::device_pointer_cast(&d_lod_counts_reduced[pitch_int*1]));
thrust::inclusive_scan(thrust::device_pointer_cast(&d_lod_counts[pitch_int*2]), thrust::device_pointer_cast(&d_lod_counts[pitch_int*2]) + get_agent_agent_default_count(), thrust::device_pointer_cast(&d_lod_counts_reduced[pitch_int*2]));
//reset and then update counts
lod1_count = 0;
lod2_count = 0;
lod3_count = 0;
gpuErrchk( cudaMemcpy( &lod1_count, &d_lod_counts_reduced[(pitch_int*0)+get_agent_agent_default_count()-1], sizeof(uint), cudaMemcpyDeviceToHost));
gpuErrchk( cudaMemcpy( &lod2_count, &d_lod_counts_reduced[(pitch_int*1)+get_agent_agent_default_count()-1], sizeof(uint), cudaMemcpyDeviceToHost));
gpuErrchk( cudaMemcpy( &lod3_count, &d_lod_counts_reduced[(pitch_int*2)+get_agent_agent_default_count()-1], sizeof(uint), cudaMemcpyDeviceToHost));
}
}
extern int getPedestrianLOD1Count()
{
return lod1_count;
}
extern int getPedestrianLOD2Count()
{
return lod2_count;
}
extern int getPedestrianLOD3Count()
{
return lod3_count;
}
|
f414f1c9419abd7d39d4140aab5f4f006b76e24a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thread>
#include <chrono>
#include <time.h>
#include <iostream>
#include <math.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/stitching.hpp>
#include <opencv2/core/utility.hpp>
// Numero de hilos por bloque
#define NumeroHilos 30.0
void sobelFiltroCPU(cv::Mat srcImg, cv::Mat dstImg, const unsigned int width, const unsigned int height);
void sobelFiltroOpenCV(cv::Mat srcImg, cv::Mat dstImg);
__global__ void sobelFiltroGPU(unsigned char* srcImg, unsigned char* dstImg, const unsigned int width, const unsigned int height){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x > 0 && y > 0 && x < width-1 && y < height-1) {
float dx = (-1* srcImg[(y-1)*width + (x-1)]) + (-2*srcImg[y*width+(x-1)]) + (-1*srcImg[(y+1)*width+(x-1)]) +
( srcImg[(y-1)*width + (x+1)]) + ( 2*srcImg[y*width+(x+1)]) + ( srcImg[(y+1)*width+(x+1)]);
float dy = ( srcImg[(y-1)*width + (x-1)]) + ( 2*srcImg[(y-1)*width+x]) + ( srcImg[(y-1)*width+(x+1)]) +
(-1* srcImg[(y+1)*width + (x-1)]) + (-2*srcImg[(y+1)*width+x]) + (-1*srcImg[(y+1)*width+(x+1)]);
dstImg[y*width + x] = sqrt( (dx*dx) + (dy*dy) ) > 255 ? 255 : sqrt( (dx*dx) + (dy*dy) );
}
}
int main(int argc, char * argv[]){
if(argc != 2){
std::cout << argv[0] << "Nmero de argumentos de lnea de comando no vlido. Salir del programa" << std::endl;
std::cout << "Uso: " << argv[0] << " [image.png]"<< std::endl;
return 1;
}
// Verifica las versiones de GPU, CUDA y OpenCV.
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
time_t rawTime; time(&rawTime);
struct tm* curTime = localtime(&rawTime);
char timeBuffer[80] = "";
strftime(timeBuffer, 80, "---------- %c ----------", curTime);
std::cout << timeBuffer << std::endl;
std::cout << "GPU: " << deviceProp.name << ", CUDA "<< deviceProp.major << "."<< deviceProp.minor <<", "<< deviceProp.totalGlobalMem / 1048576 <<
" Mbytes " <<std::endl;
std::cout << "OpenCV Version: " << CV_VERSION << std::endl;
// Cargar imagen y la transforma a escala de grises
cv::Mat srcImg = cv::imread(argv[1]);
cv::cvtColor(srcImg, srcImg, cv::COLOR_RGB2GRAY);
cv::Mat sobel_cpu = cv::Mat::zeros(srcImg.size(),srcImg.type());
cv::Mat sobel_opencv = cv::Mat::zeros(srcImg.size(), srcImg.type());
unsigned char *gpu_src, *gpu_sobel;
auto start_time = std::chrono::system_clock::now();
// ---START OPENCV
start_time = std::chrono::system_clock::now();
sobelFiltroOpenCV(srcImg, sobel_opencv);
std::chrono::duration<double> time_opencv = std::chrono::system_clock::now() - start_time;
// ---END OPENCV
// ---START CPU
sobelFiltroCPU(srcImg, sobel_cpu, srcImg.cols, srcImg.rows);
std::chrono::duration<double> time_cpu = std::chrono::system_clock::now() - start_time;
// ---END CPU
// ---SETUP GPU
// Eventos
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//Streams
hipStream_t stream;
hipStreamCreate(&stream);
// Asignar memoria para las imgenes en memoria GPU.
hipMalloc( (void**)&gpu_src, (srcImg.cols * srcImg.rows));
hipMalloc( (void**)&gpu_sobel, (srcImg.cols * srcImg.rows));
// Transfiera del host al device y configura la matriz resultante a 0s
hipMemcpy(gpu_src, srcImg.data, (srcImg.cols*srcImg.rows), hipMemcpyHostToDevice);
hipMemset(gpu_sobel, 0, (srcImg.cols*srcImg.rows));
// configura los dim3 para que el gpu los use como argumentos, hilos por bloque y nmero de bloques
dim3 threadsPerBlock(NumeroHilos, NumeroHilos, 1);
dim3 numBlocks(ceil(srcImg.cols/NumeroHilos), ceil(srcImg.rows/NumeroHilos), 1);
// ---START GPU
// Ejecutar el filtro sobel utilizando la GPU.
hipEventRecord(start);
start_time = std::chrono::system_clock::now();
hipLaunchKernelGGL(( sobelFiltroGPU), dim3(numBlocks), dim3(threadsPerBlock), 0, stream , gpu_src, gpu_sobel, srcImg.cols, srcImg.rows);
hipError_t cudaerror = hipDeviceSynchronize();
// if error, output error
if ( cudaerror != hipSuccess )
std::cout << "Cuda no se pudo sincronizar: " << hipGetErrorName( cudaerror ) <<std::endl;
std::chrono::duration<double> time_gpu = std::chrono::system_clock::now() - start_time;
// ---END GPU
// Copia los datos al CPU desde la GPU, del device al host
hipMemcpy(srcImg.data, gpu_sobel, (srcImg.cols*srcImg.rows), hipMemcpyDeviceToHost);
// Libera recursos
hipEventRecord(stop);
float time_milliseconds =0;
hipEventElapsedTime(&time_milliseconds, start, stop);
hipStreamDestroy(stream);
hipFree(gpu_src);
hipFree(gpu_sobel);
/** Tiempos de ejecucin de cada mtodo de filtrado por sobel **/
std::cout << "Archivo: "<< argv[1] << ": "<<srcImg.rows<<" rows x "<<srcImg.cols << " columns" << std::endl;
std::cout << "CPU execution time = " << 1000*time_cpu.count() <<" msec"<<std::endl;
std::cout << "OPENCV execution time = " << 1000*time_opencv.count() <<" msec"<<std::endl;
std::cout << "CUDA execution time = " << 1000*time_gpu.count() <<" msec"<<std::endl;
// Guarda resultados
cv::imwrite("ResultadoGPU.png",srcImg);
cv::imwrite("ResultadoCPU.png",sobel_cpu);
cv::imwrite("ResultadoOpenCV.png",sobel_opencv);
return 0;
}
void sobelFiltroOpenCV(cv::Mat srcImg, cv::Mat dstImg){
cv::Mat grad_x, grad_y, abs_grad_x, abs_grad_y;
cv::Sobel(srcImg, grad_x, CV_16S, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
cv::convertScaleAbs(grad_x, abs_grad_x);
cv::Sobel(srcImg, grad_y, CV_16S, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT);
cv::convertScaleAbs(grad_y, abs_grad_y);
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dstImg );
}
void sobelFiltroCPU(cv::Mat srcImg, cv::Mat dstImg, const unsigned int width, const unsigned int height){
for(int y = 1; y < srcImg.rows-1; y++) {
for(int x = 1; x < srcImg.cols-1; x++) {
float dx = (-1*srcImg.data[(y-1)*width + (x-1)]) + (-2*srcImg.data[y*width+(x-1)]) + (-1*srcImg.data[(y+1)*width+(x-1)]) +
(srcImg.data[(y-1)*width + (x+1)]) + (2*srcImg.data[y*width+(x+1)]) + (srcImg.data[(y+1)*width+(x+1)]);
float dy = (srcImg.data[(y-1)*width + (x-1)]) + (2*srcImg.data[(y-1)*width+x]) + (srcImg.data[(y-1)*width+(x+1)]) +
(-1*srcImg.data[(y+1)*width + (x-1)]) + (-2*srcImg.data[(y+1)*width+x]) + (-1*srcImg.data[(y+1)*width+(x+1)]);
dstImg.at<uchar>(y,x) = sqrt( (dx*dx) + (dy*dy) ) > 255 ? 255 : sqrt( (dx*dx) + (dy*dy) );
}
}
}
|
f414f1c9419abd7d39d4140aab5f4f006b76e24a.cu
|
#include <thread>
#include <chrono>
#include <time.h>
#include <iostream>
#include <math.h>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/stitching.hpp>
#include <opencv2/core/utility.hpp>
// Numero de hilos por bloque
#define NumeroHilos 30.0
void sobelFiltroCPU(cv::Mat srcImg, cv::Mat dstImg, const unsigned int width, const unsigned int height);
void sobelFiltroOpenCV(cv::Mat srcImg, cv::Mat dstImg);
__global__ void sobelFiltroGPU(unsigned char* srcImg, unsigned char* dstImg, const unsigned int width, const unsigned int height){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if( x > 0 && y > 0 && x < width-1 && y < height-1) {
float dx = (-1* srcImg[(y-1)*width + (x-1)]) + (-2*srcImg[y*width+(x-1)]) + (-1*srcImg[(y+1)*width+(x-1)]) +
( srcImg[(y-1)*width + (x+1)]) + ( 2*srcImg[y*width+(x+1)]) + ( srcImg[(y+1)*width+(x+1)]);
float dy = ( srcImg[(y-1)*width + (x-1)]) + ( 2*srcImg[(y-1)*width+x]) + ( srcImg[(y-1)*width+(x+1)]) +
(-1* srcImg[(y+1)*width + (x-1)]) + (-2*srcImg[(y+1)*width+x]) + (-1*srcImg[(y+1)*width+(x+1)]);
dstImg[y*width + x] = sqrt( (dx*dx) + (dy*dy) ) > 255 ? 255 : sqrt( (dx*dx) + (dy*dy) );
}
}
int main(int argc, char * argv[]){
if(argc != 2){
std::cout << argv[0] << "Número de argumentos de línea de comando no válido. Salir del programa" << std::endl;
std::cout << "Uso: " << argv[0] << " [image.png]"<< std::endl;
return 1;
}
// Verifica las versiones de GPU, CUDA y OpenCV.
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
time_t rawTime; time(&rawTime);
struct tm* curTime = localtime(&rawTime);
char timeBuffer[80] = "";
strftime(timeBuffer, 80, "---------- %c ----------", curTime);
std::cout << timeBuffer << std::endl;
std::cout << "GPU: " << deviceProp.name << ", CUDA "<< deviceProp.major << "."<< deviceProp.minor <<", "<< deviceProp.totalGlobalMem / 1048576 <<
" Mbytes " <<std::endl;
std::cout << "OpenCV Version: " << CV_VERSION << std::endl;
// Cargar imagen y la transforma a escala de grises
cv::Mat srcImg = cv::imread(argv[1]);
cv::cvtColor(srcImg, srcImg, cv::COLOR_RGB2GRAY);
cv::Mat sobel_cpu = cv::Mat::zeros(srcImg.size(),srcImg.type());
cv::Mat sobel_opencv = cv::Mat::zeros(srcImg.size(), srcImg.type());
unsigned char *gpu_src, *gpu_sobel;
auto start_time = std::chrono::system_clock::now();
// ---START OPENCV
start_time = std::chrono::system_clock::now();
sobelFiltroOpenCV(srcImg, sobel_opencv);
std::chrono::duration<double> time_opencv = std::chrono::system_clock::now() - start_time;
// ---END OPENCV
// ---START CPU
sobelFiltroCPU(srcImg, sobel_cpu, srcImg.cols, srcImg.rows);
std::chrono::duration<double> time_cpu = std::chrono::system_clock::now() - start_time;
// ---END CPU
// ---SETUP GPU
// Eventos
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Streams
cudaStream_t stream;
cudaStreamCreate(&stream);
// Asignar memoria para las imágenes en memoria GPU.
cudaMalloc( (void**)&gpu_src, (srcImg.cols * srcImg.rows));
cudaMalloc( (void**)&gpu_sobel, (srcImg.cols * srcImg.rows));
// Transfiera del host al device y configura la matriz resultante a 0s
cudaMemcpy(gpu_src, srcImg.data, (srcImg.cols*srcImg.rows), cudaMemcpyHostToDevice);
cudaMemset(gpu_sobel, 0, (srcImg.cols*srcImg.rows));
// configura los dim3 para que el gpu los use como argumentos, hilos por bloque y número de bloques
dim3 threadsPerBlock(NumeroHilos, NumeroHilos, 1);
dim3 numBlocks(ceil(srcImg.cols/NumeroHilos), ceil(srcImg.rows/NumeroHilos), 1);
// ---START GPU
// Ejecutar el filtro sobel utilizando la GPU.
cudaEventRecord(start);
start_time = std::chrono::system_clock::now();
sobelFiltroGPU<<< numBlocks, threadsPerBlock, 0, stream >>>(gpu_src, gpu_sobel, srcImg.cols, srcImg.rows);
cudaError_t cudaerror = cudaDeviceSynchronize();
// if error, output error
if ( cudaerror != cudaSuccess )
std::cout << "Cuda no se pudo sincronizar: " << cudaGetErrorName( cudaerror ) <<std::endl;
std::chrono::duration<double> time_gpu = std::chrono::system_clock::now() - start_time;
// ---END GPU
// Copia los datos al CPU desde la GPU, del device al host
cudaMemcpy(srcImg.data, gpu_sobel, (srcImg.cols*srcImg.rows), cudaMemcpyDeviceToHost);
// Libera recursos
cudaEventRecord(stop);
float time_milliseconds =0;
cudaEventElapsedTime(&time_milliseconds, start, stop);
cudaStreamDestroy(stream);
cudaFree(gpu_src);
cudaFree(gpu_sobel);
/** Tiempos de ejecución de cada método de filtrado por sobel **/
std::cout << "Archivo: "<< argv[1] << ": "<<srcImg.rows<<" rows x "<<srcImg.cols << " columns" << std::endl;
std::cout << "CPU execution time = " << 1000*time_cpu.count() <<" msec"<<std::endl;
std::cout << "OPENCV execution time = " << 1000*time_opencv.count() <<" msec"<<std::endl;
std::cout << "CUDA execution time = " << 1000*time_gpu.count() <<" msec"<<std::endl;
// Guarda resultados
cv::imwrite("ResultadoGPU.png",srcImg);
cv::imwrite("ResultadoCPU.png",sobel_cpu);
cv::imwrite("ResultadoOpenCV.png",sobel_opencv);
return 0;
}
void sobelFiltroOpenCV(cv::Mat srcImg, cv::Mat dstImg){
cv::Mat grad_x, grad_y, abs_grad_x, abs_grad_y;
cv::Sobel(srcImg, grad_x, CV_16S, 1, 0, 3, 1, 0, cv::BORDER_DEFAULT);
cv::convertScaleAbs(grad_x, abs_grad_x);
cv::Sobel(srcImg, grad_y, CV_16S, 0, 1, 3, 1, 0, cv::BORDER_DEFAULT);
cv::convertScaleAbs(grad_y, abs_grad_y);
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dstImg );
}
void sobelFiltroCPU(cv::Mat srcImg, cv::Mat dstImg, const unsigned int width, const unsigned int height){
for(int y = 1; y < srcImg.rows-1; y++) {
for(int x = 1; x < srcImg.cols-1; x++) {
float dx = (-1*srcImg.data[(y-1)*width + (x-1)]) + (-2*srcImg.data[y*width+(x-1)]) + (-1*srcImg.data[(y+1)*width+(x-1)]) +
(srcImg.data[(y-1)*width + (x+1)]) + (2*srcImg.data[y*width+(x+1)]) + (srcImg.data[(y+1)*width+(x+1)]);
float dy = (srcImg.data[(y-1)*width + (x-1)]) + (2*srcImg.data[(y-1)*width+x]) + (srcImg.data[(y-1)*width+(x+1)]) +
(-1*srcImg.data[(y+1)*width + (x-1)]) + (-2*srcImg.data[(y+1)*width+x]) + (-1*srcImg.data[(y+1)*width+(x+1)]);
dstImg.at<uchar>(y,x) = sqrt( (dx*dx) + (dy*dy) ) > 255 ? 255 : sqrt( (dx*dx) + (dy*dy) );
}
}
}
|
d4e200eed91f171b394608d0514705d2fbc8cd89.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <cmath>
#include <random>
#include "cudaBase.h"
// Como compilar:
// nvcc -std=c++11 -arch=sm_35 main.cu
using namespace std;
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, hipGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call, msg) _safe_cuda_call((call),(msg),__FILE__, __LINE__)
class CudaTime {
hipEvent_t start, stop;
public:
CudaTime() {
hipEventCreate(&start);
hipEventCreate(&stop);
}
void record() {
hipEventRecord( start, 0 );
}
void stopAndPrint(const char* msg) {
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
float elapsedTime;
hipEventElapsedTime( &elapsedTime, start, stop);
printf( "Elapsed time on %s: %3.1f ms\n", msg , elapsedTime );
}
};
__device__
float Ackley(float* X, int dim) //
{
float a = 20.0f, b = 0.2f, c = 2 * M_PI, sum = 0.0f, cosenos = 0.0f;
for (int i = 0; i < dim; ++i)
{
sum += X[i] * X[i];
cosenos += cosf(c * X[i]);
}
return -a * expf(-b * sqrtf(sum / dim)) - expf(cosenos / dim) + a + expf(1);
}
__device__
float Schwefel(float* X, int dim)
{
float sum = 0.0f;
for (int i = 0; i < dim; ++i)
sum += X[i] * sinf(sqrtf(fabsf(X[i])));
return 418.9829 * dim - sum;
}
__device__
float Funcion_3(float* X, int dim)
{
float sum = 0.0f;
for (int i = 0; i < dim; ++i)
sum += X[i] * X[i];
return 0.5 - ( powf(sinf(sqrtf(sum)), 2) - 0.5 ) / powf(1.0 + 0.001 * sum, 2);
}
__global__ void GSA_iteration_fitness(float** position, float* fitness, int funcion, int numero_poblacion, int dim) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * blockDim.x * gridDim.x + xIndex;
if (index < numero_poblacion)
{
switch (funcion) {
case 1:
fitness[index] = Ackley(position[index], dim);
break;
case 2:
fitness[index] = Schwefel(position[index], dim);
break;
case 3:
fitness[index] = Funcion_3(position[index], dim);
break;
}
}
}
__device__
float* getF(float* i_position, float i_Mp, float* j_position, float j_Ma, int dimension)
{
float R = 0.0f;
// int dim = i.position.size();
for (size_t component = 0; component < dimension; component++)
R += pow(i_position[component] - j_position[component], 2);
R = sqrtf(R);
float e = 2.2204e-016;//algun valor pequeo
float sub_F = i_Mp * j_Ma / (R + e);
float* Force = new float[dimension * sizeof(float)];
for (size_t component = 0; component < dimension; component++)
Force[component] = sub_F * (j_position[component] - i_position[component]);
return Force;
}
__global__ void initRandomSeed( hiprandState_t *devState, unsigned int seed , int numero_poblacion) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * blockDim.x * gridDim.x + xIndex;
if (index < numero_poblacion)
hiprand_init( seed, index, 0, &devState[index] );
}
__global__ void GSA_iteration_move(float** position, float** A, float** V, float** F,
float* fitness, float* M, float* sum,
float G, int numero_poblacion, int dimension, int iteraciones, int min, int max,
float best, float worst, hiprandState_t *devState) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * blockDim.x * gridDim.x + xIndex;
if (index < numero_poblacion)
{
float m = (fitness[index] - worst) / (best - worst); //Equation 15
atomicAdd( sum, m );
__syncthreads();
M[index] = m / *sum; //Equation 16
//(e)Calculation of the total force in different directions.
int l;
if (iteraciones < numero_poblacion) l = iteraciones;
else l = numero_poblacion;
//vaciamos forces
for (size_t k = 0; k < dimension; k++)
F[index][k] = 0;
//mejora, solo los mejoras van a influir.
for (size_t j = 0; j < l; j++)
{
if (index != j)
{
float* Force = getF(position[index], M[index] , position[j], M[j], dimension);
for (size_t k = 0; k < dimension; k++)
F[index][k] += hiprand_uniform(&devState[index]) * G * Force[k];
free(Force);
}
}
//(f)Calculation of acceleration and velocity.
for (size_t i = 0; i < dimension; i++)
{
if (M[index] == 0)
A[index][i] = 0;
else
A[index][i] = F[index][i] / M[index]; //Equation 10
}
for (size_t i = 0; i < dimension; i++)
V[index][i] = hiprand_uniform(&devState[index]) * V[index][i] + A[index][i]; //Equation 11
//(g)Updating agents position.
for (size_t i = 0; i < dimension; i++)
{
// position[index][i] = std::min<float>(position[index][i] + V[index][i], max);
if (position[index][i] + V[index][i] < max)
position[index][i] = position[index][i] + V[index][i];
else
position[index][i] = max;
// position[index][i] = std::max<float>(position[index][i], min); //Equation 12
if (position[index][i] > min)
position[index][i] = position[index][i];
else
position[index][i] = min;
}
}
}
void GSA_cu(int funcion, unsigned int numero_poblacion, unsigned int dimension, float minimo, float maximo, bool minimizar,
int iteraciones, float G) {
//Initialize
std::random_device generador;
std::uniform_real_distribution<float> distribucion(minimo, maximo);
float** position = (float **) malloc( numero_poblacion * sizeof(float*)) ;
for (int i = 0; i < numero_poblacion; ++i) {
position[i] = (float *)malloc(dimension * sizeof(float));
for (size_t j = 0; j < dimension; j++)
position[i][j] = distribucion(generador);
}
float bestFitnessSoFar;
std::vector<float> bestPositionSoFar(dimension);
if (minimizar) bestFitnessSoFar = 1000000000000.0;
else bestFitnessSoFar = -1000000000000.0;
float bestFitness ;
float worstFitness ;
// int bestFitness_idx ;
if (minimizar) {
bestFitness = 1000000000;
worstFitness = -1000000000;
} else {
bestFitness = -1000000000;
worstFitness = 1000000000;
}
float G_step = G / iteraciones;
//(b)Randomized initialization.
/*** Copy Position HOST TO DEVICE ***/
float** dev_position = 0;
float* dev_temp_position[numero_poblacion];
// first create top level pointer
SAFE_CALL(hipMalloc(&dev_position, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(hipMalloc(&dev_temp_position[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(hipMemcpy(&(dev_position[i]), &(dev_temp_position[i]), sizeof(float *), hipMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
SAFE_CALL(hipMemcpy(dev_temp_position[i], position[i], dimension * sizeof(float), hipMemcpyHostToDevice), "CUDA Memset Failed"); // copy image to device
}
/*** end Copy Position ***/
/*** Copy Velocity HOST TO DEVICE ***/
float** dev_V = 0;
float* dev_temp_V[numero_poblacion];
// first create top level pointer
SAFE_CALL(hipMalloc(&dev_V, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(hipMalloc(&dev_temp_V[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(hipMemcpy(&(dev_V[i]), &(dev_temp_V[i]), sizeof(float *), hipMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
}
/*** end Copy Velocity ***/
/*** Copy Acceleration HOST TO DEVICE ***/
float** dev_A = 0;
float* dev_temp_A[numero_poblacion];
// first create top level pointer
SAFE_CALL(hipMalloc(&dev_A, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(hipMalloc(&dev_temp_A[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(hipMemcpy(&(dev_A[i]), &(dev_temp_A[i]), sizeof(float *), hipMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
}
/*** end Copy Acceleration ***/
/*** Copy force HOST TO DEVICE ***/
float** dev_F = 0;
float* dev_temp_F[numero_poblacion];
// first create top level pointer
SAFE_CALL(hipMalloc(&dev_F, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(hipMalloc(&dev_temp_F[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(hipMemcpy(&(dev_F[i]), &(dev_temp_F[i]), sizeof(float *), hipMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
}
/*** end Copy force ***/
// Device variables
hiprandState_t *devState;
hipMalloc( (void **)&devState, numero_poblacion * sizeof(hiprandState_t) );
float* dev_fitness;
float* dev_M;
float* fitness = new float[numero_poblacion * sizeof(float)];
SAFE_CALL(hipMalloc(&dev_fitness, numero_poblacion * sizeof(float) ), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc(&dev_M, numero_poblacion * sizeof(float) ), "CUDA Malloc Failed");
//Allocate mempry for the sum
float* dev_sum;
// float* sum = (float*)malloc(sizeof(float));
hipMalloc((void**)&dev_sum, sizeof(float));
hipMemset(dev_sum, 0, sizeof(float));
//Specify a reasonable block size
const dim3 block(4, 4);
//Calculate grid size to cover the whole image
const dim3 grid( ( ceil(sqrt(numero_poblacion)) + block.x - 1) / block.x,
( ceil(sqrt(numero_poblacion)) + block.y - 1) / block.y); //implicit cast to int ceil
hipLaunchKernelGGL(( initRandomSeed) , dim3(grid), dim3(block), 0, 0, devState, (unsigned int)time(NULL), numero_poblacion);
for (int _ = 0; _ < iteraciones - 1; ++_)
{
hipLaunchKernelGGL(( GSA_iteration_fitness) , dim3(grid), dim3(block), 0, 0, dev_position, dev_fitness, funcion, numero_poblacion, dimension);
hipDeviceSynchronize();
SAFE_CALL(hipMemcpy(fitness, dev_fitness, numero_poblacion * sizeof(float) , hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
for (int i = 0; i < numero_poblacion; ++i)
{
if (minimizar) {
if (fitness[i] < bestFitness){
bestFitness = fitness[i];
// bestFitness_idx = i;
}
if (fitness[i] > worstFitness)
worstFitness = fitness[i];
} else {
if (fitness[i] > bestFitness){
bestFitness = fitness[i];
// bestFitness_idx = i;
}
if (fitness[i] < worstFitness)
worstFitness = fitness[i];
}
}
if (minimizar)
{
if (bestFitness < bestFitnessSoFar)
bestFitnessSoFar = bestFitness;
} else {
if (bestFitness > bestFitnessSoFar)
bestFitnessSoFar = bestFitness;
}
G -= G_step;
hipLaunchKernelGGL(( GSA_iteration_move) , dim3(grid), dim3(block), 0, 0, dev_position, dev_A, dev_V, dev_F, dev_fitness, dev_M, dev_sum, G, numero_poblacion,
dimension, iteraciones, minimo, maximo, bestFitness, worstFitness, devState);
hipDeviceSynchronize();
hipMemset(dev_sum, 0, sizeof(float));
}
cout<<endl<<"Result: "<< bestFitnessSoFar<<endl;
}
int main()
{
int numero_poblacion = 60, dimension = 2;
float G = 18;
bool minimizar = true;
int iteraciones = 50;
CudaTime tiempo;
tiempo.record();
for (int i = 0; i < 1; ++i)
{
float minimo = -32.768f, maximo = 32.768f;
GSA_cu(1, numero_poblacion, dimension, minimo, maximo, minimizar, iteraciones, G);
}
// cout << "All good" << endl;
// for (int i = 0; i < 1; ++i)
// {
// float minimo = -500.0f, maximo = 500.0f;
// GSA_cu(2, numero_poblacion, dimension, minimo, maximo, minimizar, iteraciones, G);
// }
// for (int i = 0; i < 1; ++i)
// {
// minimizar = false;
// float minimo = -100.0f, maximo = 100.0f;
// GSA_cu(3, numero_poblacion, dimension, minimo, maximo, minimizar, iteraciones, G);
// }
tiempo.stopAndPrint("GSA");
return 0;
}
|
d4e200eed91f171b394608d0514705d2fbc8cd89.cu
|
#include <iostream>
#include <vector>
#include <cmath>
#include <random>
#include "cudaBase.h"
// Como compilar:
// nvcc -std=c++11 -arch=sm_35 main.cu
using namespace std;
#include <cuda_runtime.h>
#include <cuda.h>
#include <curand_kernel.h>
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg, file_name, line_number, cudaGetErrorString(err));
std::cin.get();
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call, msg) _safe_cuda_call((call),(msg),__FILE__, __LINE__)
class CudaTime {
cudaEvent_t start, stop;
public:
CudaTime() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
void record() {
cudaEventRecord( start, 0 );
}
void stopAndPrint(const char* msg) {
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop);
printf( "Elapsed time on %s: %3.1f ms\n", msg , elapsedTime );
}
};
__device__
float Ackley(float* X, int dim) //
{
float a = 20.0f, b = 0.2f, c = 2 * M_PI, sum = 0.0f, cosenos = 0.0f;
for (int i = 0; i < dim; ++i)
{
sum += X[i] * X[i];
cosenos += cosf(c * X[i]);
}
return -a * expf(-b * sqrtf(sum / dim)) - expf(cosenos / dim) + a + expf(1);
}
__device__
float Schwefel(float* X, int dim)
{
float sum = 0.0f;
for (int i = 0; i < dim; ++i)
sum += X[i] * sinf(sqrtf(fabsf(X[i])));
return 418.9829 * dim - sum;
}
__device__
float Funcion_3(float* X, int dim)
{
float sum = 0.0f;
for (int i = 0; i < dim; ++i)
sum += X[i] * X[i];
return 0.5 - ( powf(sinf(sqrtf(sum)), 2) - 0.5 ) / powf(1.0 + 0.001 * sum, 2);
}
__global__ void GSA_iteration_fitness(float** position, float* fitness, int funcion, int numero_poblacion, int dim) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * blockDim.x * gridDim.x + xIndex;
if (index < numero_poblacion)
{
switch (funcion) {
case 1:
fitness[index] = Ackley(position[index], dim);
break;
case 2:
fitness[index] = Schwefel(position[index], dim);
break;
case 3:
fitness[index] = Funcion_3(position[index], dim);
break;
}
}
}
__device__
float* getF(float* i_position, float i_Mp, float* j_position, float j_Ma, int dimension)
{
float R = 0.0f;
// int dim = i.position.size();
for (size_t component = 0; component < dimension; component++)
R += pow(i_position[component] - j_position[component], 2);
R = sqrtf(R);
float e = 2.2204e-016;//algun valor pequeño
float sub_F = i_Mp * j_Ma / (R + e);
float* Force = new float[dimension * sizeof(float)];
for (size_t component = 0; component < dimension; component++)
Force[component] = sub_F * (j_position[component] - i_position[component]);
return Force;
}
__global__ void initRandomSeed( curandState *devState, unsigned int seed , int numero_poblacion) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * blockDim.x * gridDim.x + xIndex;
if (index < numero_poblacion)
curand_init( seed, index, 0, &devState[index] );
}
__global__ void GSA_iteration_move(float** position, float** A, float** V, float** F,
float* fitness, float* M, float* sum,
float G, int numero_poblacion, int dimension, int iteraciones, int min, int max,
float best, float worst, curandState *devState) {
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
const int index = yIndex * blockDim.x * gridDim.x + xIndex;
if (index < numero_poblacion)
{
float m = (fitness[index] - worst) / (best - worst); //Equation 15
atomicAdd( sum, m );
__syncthreads();
M[index] = m / *sum; //Equation 16
//(e)Calculation of the total force in different directions.
int l;
if (iteraciones < numero_poblacion) l = iteraciones;
else l = numero_poblacion;
//vaciamos forces
for (size_t k = 0; k < dimension; k++)
F[index][k] = 0;
//mejora, solo los mejoras van a influir.
for (size_t j = 0; j < l; j++)
{
if (index != j)
{
float* Force = getF(position[index], M[index] , position[j], M[j], dimension);
for (size_t k = 0; k < dimension; k++)
F[index][k] += curand_uniform(&devState[index]) * G * Force[k];
free(Force);
}
}
//(f)Calculation of acceleration and velocity.
for (size_t i = 0; i < dimension; i++)
{
if (M[index] == 0)
A[index][i] = 0;
else
A[index][i] = F[index][i] / M[index]; //Equation 10
}
for (size_t i = 0; i < dimension; i++)
V[index][i] = curand_uniform(&devState[index]) * V[index][i] + A[index][i]; //Equation 11
//(g)Updating agents’ position.
for (size_t i = 0; i < dimension; i++)
{
// position[index][i] = std::min<float>(position[index][i] + V[index][i], max);
if (position[index][i] + V[index][i] < max)
position[index][i] = position[index][i] + V[index][i];
else
position[index][i] = max;
// position[index][i] = std::max<float>(position[index][i], min); //Equation 12
if (position[index][i] > min)
position[index][i] = position[index][i];
else
position[index][i] = min;
}
}
}
void GSA_cu(int funcion, unsigned int numero_poblacion, unsigned int dimension, float minimo, float maximo, bool minimizar,
int iteraciones, float G) {
//Initialize
std::random_device generador;
std::uniform_real_distribution<float> distribucion(minimo, maximo);
float** position = (float **) malloc( numero_poblacion * sizeof(float*)) ;
for (int i = 0; i < numero_poblacion; ++i) {
position[i] = (float *)malloc(dimension * sizeof(float));
for (size_t j = 0; j < dimension; j++)
position[i][j] = distribucion(generador);
}
float bestFitnessSoFar;
std::vector<float> bestPositionSoFar(dimension);
if (minimizar) bestFitnessSoFar = 1000000000000.0;
else bestFitnessSoFar = -1000000000000.0;
float bestFitness ;
float worstFitness ;
// int bestFitness_idx ;
if (minimizar) {
bestFitness = 1000000000;
worstFitness = -1000000000;
} else {
bestFitness = -1000000000;
worstFitness = 1000000000;
}
float G_step = G / iteraciones;
//(b)Randomized initialization.
/*** Copy Position HOST TO DEVICE ***/
float** dev_position = 0;
float* dev_temp_position[numero_poblacion];
// first create top level pointer
SAFE_CALL(cudaMalloc(&dev_position, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(cudaMalloc(&dev_temp_position[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(&(dev_position[i]), &(dev_temp_position[i]), sizeof(float *), cudaMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
SAFE_CALL(cudaMemcpy(dev_temp_position[i], position[i], dimension * sizeof(float), cudaMemcpyHostToDevice), "CUDA Memset Failed"); // copy image to device
}
/*** end Copy Position ***/
/*** Copy Velocity HOST TO DEVICE ***/
float** dev_V = 0;
float* dev_temp_V[numero_poblacion];
// first create top level pointer
SAFE_CALL(cudaMalloc(&dev_V, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(cudaMalloc(&dev_temp_V[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(&(dev_V[i]), &(dev_temp_V[i]), sizeof(float *), cudaMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
}
/*** end Copy Velocity ***/
/*** Copy Acceleration HOST TO DEVICE ***/
float** dev_A = 0;
float* dev_temp_A[numero_poblacion];
// first create top level pointer
SAFE_CALL(cudaMalloc(&dev_A, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(cudaMalloc(&dev_temp_A[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(&(dev_A[i]), &(dev_temp_A[i]), sizeof(float *), cudaMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
}
/*** end Copy Acceleration ***/
/*** Copy force HOST TO DEVICE ***/
float** dev_F = 0;
float* dev_temp_F[numero_poblacion];
// first create top level pointer
SAFE_CALL(cudaMalloc(&dev_F, sizeof(float*) * numero_poblacion), "CUDA Malloc Failed");
// then create child pointers on host, and copy to device, then copy image
for (int i = 0; i < numero_poblacion; i++)
{
SAFE_CALL(cudaMalloc(&dev_temp_F[i], dimension * sizeof(float) ), "CUDA Memset Failed");
SAFE_CALL(cudaMemcpy(&(dev_F[i]), &(dev_temp_F[i]), sizeof(float *), cudaMemcpyHostToDevice), "CUDA Memset Failed");//copy child pointer to device
}
/*** end Copy force ***/
// Device variables
curandState *devState;
cudaMalloc( (void **)&devState, numero_poblacion * sizeof(curandState) );
float* dev_fitness;
float* dev_M;
float* fitness = new float[numero_poblacion * sizeof(float)];
SAFE_CALL(cudaMalloc(&dev_fitness, numero_poblacion * sizeof(float) ), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc(&dev_M, numero_poblacion * sizeof(float) ), "CUDA Malloc Failed");
//Allocate mempry for the sum
float* dev_sum;
// float* sum = (float*)malloc(sizeof(float));
cudaMalloc((void**)&dev_sum, sizeof(float));
cudaMemset(dev_sum, 0, sizeof(float));
//Specify a reasonable block size
const dim3 block(4, 4);
//Calculate grid size to cover the whole image
const dim3 grid( ( ceil(sqrt(numero_poblacion)) + block.x - 1) / block.x,
( ceil(sqrt(numero_poblacion)) + block.y - 1) / block.y); //implicit cast to int ceil
initRandomSeed <<< grid, block>>>(devState, (unsigned int)time(NULL), numero_poblacion);
for (int _ = 0; _ < iteraciones - 1; ++_)
{
GSA_iteration_fitness <<< grid, block>>>(dev_position, dev_fitness, funcion, numero_poblacion, dimension);
cudaDeviceSynchronize();
SAFE_CALL(cudaMemcpy(fitness, dev_fitness, numero_poblacion * sizeof(float) , cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
for (int i = 0; i < numero_poblacion; ++i)
{
if (minimizar) {
if (fitness[i] < bestFitness){
bestFitness = fitness[i];
// bestFitness_idx = i;
}
if (fitness[i] > worstFitness)
worstFitness = fitness[i];
} else {
if (fitness[i] > bestFitness){
bestFitness = fitness[i];
// bestFitness_idx = i;
}
if (fitness[i] < worstFitness)
worstFitness = fitness[i];
}
}
if (minimizar)
{
if (bestFitness < bestFitnessSoFar)
bestFitnessSoFar = bestFitness;
} else {
if (bestFitness > bestFitnessSoFar)
bestFitnessSoFar = bestFitness;
}
G -= G_step;
GSA_iteration_move <<< grid, block>>>(dev_position, dev_A, dev_V, dev_F, dev_fitness, dev_M, dev_sum, G, numero_poblacion,
dimension, iteraciones, minimo, maximo, bestFitness, worstFitness, devState);
cudaDeviceSynchronize();
cudaMemset(dev_sum, 0, sizeof(float));
}
cout<<endl<<"Result: "<< bestFitnessSoFar<<endl;
}
int main()
{
int numero_poblacion = 60, dimension = 2;
float G = 18;
bool minimizar = true;
int iteraciones = 50;
CudaTime tiempo;
tiempo.record();
for (int i = 0; i < 1; ++i)
{
float minimo = -32.768f, maximo = 32.768f;
GSA_cu(1, numero_poblacion, dimension, minimo, maximo, minimizar, iteraciones, G);
}
// cout << "All good" << endl;
// for (int i = 0; i < 1; ++i)
// {
// float minimo = -500.0f, maximo = 500.0f;
// GSA_cu(2, numero_poblacion, dimension, minimo, maximo, minimizar, iteraciones, G);
// }
// for (int i = 0; i < 1; ++i)
// {
// minimizar = false;
// float minimo = -100.0f, maximo = 100.0f;
// GSA_cu(3, numero_poblacion, dimension, minimo, maximo, minimizar, iteraciones, G);
// }
tiempo.stopAndPrint("GSA");
return 0;
}
|
c826c512f68f6f89fa43dca0e9d1e9566e70fdd2.hip
|
// !!! This is a file automatically generated by hipify!!!
// from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d_kernel.cu
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
int c = a / b;
if (c * b > a) {
c--;
}
return c;
}
struct UpFirDn2DKernelParams {
int up_x;
int up_y;
int down_x;
int down_y;
int pad_x0;
int pad_x1;
int pad_y0;
int pad_y1;
int major_dim;
int in_h;
int in_w;
int minor_dim;
int kernel_h;
int kernel_w;
int out_h;
int out_w;
int loop_major;
int loop_x;
};
template <typename scalar_t>
__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
int minor_idx = blockIdx.x * blockDim.x + threadIdx.x;
int out_y = minor_idx / p.minor_dim;
minor_idx -= out_y * p.minor_dim;
int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y;
int major_idx_base = blockIdx.z * p.loop_major;
if (out_x_base >= p.out_w || out_y >= p.out_h ||
major_idx_base >= p.major_dim) {
return;
}
int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0;
int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h);
int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y;
int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y;
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major && major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, out_x = out_x_base;
loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) {
int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0;
int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w);
int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x;
int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x;
const scalar_t *x_p =
&input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim +
minor_idx];
const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x];
int x_px = p.minor_dim;
int k_px = -p.up_x;
int x_py = p.in_w * p.minor_dim;
int k_py = -p.up_y * p.kernel_w;
scalar_t v = 0.0f;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p);
x_p += x_px;
k_p += k_px;
}
x_p += x_py - w * x_px;
k_p += k_py - w * k_px;
}
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
template <typename scalar_t, int up_x, int up_y, int down_x, int down_y,
int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
__shared__ volatile float sk[kernel_h][kernel_w];
__shared__ volatile float sx[tile_in_h][tile_in_w];
int minor_idx = blockIdx.x;
int tile_out_y = minor_idx / p.minor_dim;
minor_idx -= tile_out_y * p.minor_dim;
tile_out_y *= tile_out_h;
int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
int major_idx_base = blockIdx.z * p.loop_major;
if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h |
major_idx_base >= p.major_dim) {
return;
}
for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w;
tap_idx += blockDim.x) {
int ky = tap_idx / kernel_w;
int kx = tap_idx - ky * kernel_w;
scalar_t v = 0.0;
if (kx < p.kernel_w & ky < p.kernel_h) {
v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
}
sk[ky][kx] = v;
}
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major & major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, tile_out_x = tile_out_x_base;
loop_x < p.loop_x & tile_out_x < p.out_w;
loop_x++, tile_out_x += tile_out_w) {
int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
int tile_in_x = floor_div(tile_mid_x, up_x);
int tile_in_y = floor_div(tile_mid_y, up_y);
__syncthreads();
for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w;
in_idx += blockDim.x) {
int rel_in_y = in_idx / tile_in_w;
int rel_in_x = in_idx - rel_in_y * tile_in_w;
int in_x = rel_in_x + tile_in_x;
int in_y = rel_in_y + tile_in_y;
scalar_t v = 0.0;
if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) *
p.minor_dim +
minor_idx];
}
sx[rel_in_y][rel_in_x] = v;
}
__syncthreads();
for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w;
out_idx += blockDim.x) {
int rel_out_y = out_idx / tile_out_w;
int rel_out_x = out_idx - rel_out_y * tile_out_w;
int out_x = rel_out_x + tile_out_x;
int out_y = rel_out_y + tile_out_y;
int mid_x = tile_mid_x + rel_out_x * down_x;
int mid_y = tile_mid_y + rel_out_y * down_y;
int in_x = floor_div(mid_x, up_x);
int in_y = floor_div(mid_y, up_y);
int rel_in_x = in_x - tile_in_x;
int rel_in_y = in_y - tile_in_y;
int kernel_x = (in_x + 1) * up_x - mid_x - 1;
int kernel_y = (in_y + 1) * up_y - mid_y - 1;
scalar_t v = 0.0;
#pragma unroll
for (int y = 0; y < kernel_h / up_y; y++)
#pragma unroll
for (int x = 0; x < kernel_w / up_x; x++)
v += sx[rel_in_y + y][rel_in_x + x] *
sk[kernel_y + y * up_y][kernel_x + x * up_x];
if (out_x < p.out_w & out_y < p.out_h) {
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
}
}
torch::Tensor upfirdn2d_op(const torch::Tensor &input,
const torch::Tensor &kernel, int up_x, int up_y,
int down_x, int down_y, int pad_x0, int pad_x1,
int pad_y0, int pad_y1) {
int curDevice = -1;
hipGetDevice(&curDevice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice);
UpFirDn2DKernelParams p;
auto x = input.contiguous();
auto k = kernel.contiguous();
p.major_dim = x.size(0);
p.in_h = x.size(1);
p.in_w = x.size(2);
p.minor_dim = x.size(3);
p.kernel_h = k.size(0);
p.kernel_w = k.size(1);
p.up_x = up_x;
p.up_y = up_y;
p.down_x = down_x;
p.down_y = down_y;
p.pad_x0 = pad_x0;
p.pad_x1 = pad_x1;
p.pad_y0 = pad_y0;
p.pad_y1 = pad_y1;
p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) /
p.down_y;
p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) /
p.down_x;
auto out =
at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
int mode = -1;
int tile_out_h = -1;
int tile_out_w = -1;
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 1;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 3 && p.kernel_w <= 3) {
mode = 2;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 3;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 4;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 5;
tile_out_h = 8;
tile_out_w = 32;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 6;
tile_out_h = 8;
tile_out_w = 32;
}
dim3 block_size;
dim3 grid_size;
if (tile_out_h > 0 && tile_out_w > 0) {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 1;
block_size = dim3(32 * 8, 1, 1);
grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
(p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
(p.major_dim - 1) / p.loop_major + 1);
} else {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 4;
block_size = dim3(4, 32, 1);
grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1,
(p.out_w - 1) / (p.loop_x * block_size.y) + 1,
(p.major_dim - 1) / p.loop_major + 1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
switch (mode) {
case 1:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 2:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 3:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 4:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 5:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 6:
hipLaunchKernelGGL(( upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>)
, dim3(grid_size), dim3(block_size), 0, stream, out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
default:
hipLaunchKernelGGL(( upfirdn2d_kernel_large<scalar_t>), dim3(grid_size), dim3(block_size), 0, stream,
out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
}
});
return out;
}
|
c826c512f68f6f89fa43dca0e9d1e9566e70fdd2.cu
|
// from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d_kernel.cu
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <torch/types.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
static __host__ __device__ __forceinline__ int floor_div(int a, int b) {
int c = a / b;
if (c * b > a) {
c--;
}
return c;
}
struct UpFirDn2DKernelParams {
int up_x;
int up_y;
int down_x;
int down_y;
int pad_x0;
int pad_x1;
int pad_y0;
int pad_y1;
int major_dim;
int in_h;
int in_w;
int minor_dim;
int kernel_h;
int kernel_w;
int out_h;
int out_w;
int loop_major;
int loop_x;
};
template <typename scalar_t>
__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
int minor_idx = blockIdx.x * blockDim.x + threadIdx.x;
int out_y = minor_idx / p.minor_dim;
minor_idx -= out_y * p.minor_dim;
int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y;
int major_idx_base = blockIdx.z * p.loop_major;
if (out_x_base >= p.out_w || out_y >= p.out_h ||
major_idx_base >= p.major_dim) {
return;
}
int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0;
int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h);
int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y;
int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y;
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major && major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, out_x = out_x_base;
loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) {
int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0;
int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w);
int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x;
int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x;
const scalar_t *x_p =
&input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim +
minor_idx];
const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x];
int x_px = p.minor_dim;
int k_px = -p.up_x;
int x_py = p.in_w * p.minor_dim;
int k_py = -p.up_y * p.kernel_w;
scalar_t v = 0.0f;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
v += static_cast<scalar_t>(*x_p) * static_cast<scalar_t>(*k_p);
x_p += x_px;
k_p += k_px;
}
x_p += x_py - w * x_px;
k_p += k_py - w * k_px;
}
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
template <typename scalar_t, int up_x, int up_y, int down_x, int down_y,
int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>
__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input,
const scalar_t *kernel,
const UpFirDn2DKernelParams p) {
const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;
const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;
__shared__ volatile float sk[kernel_h][kernel_w];
__shared__ volatile float sx[tile_in_h][tile_in_w];
int minor_idx = blockIdx.x;
int tile_out_y = minor_idx / p.minor_dim;
minor_idx -= tile_out_y * p.minor_dim;
tile_out_y *= tile_out_h;
int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;
int major_idx_base = blockIdx.z * p.loop_major;
if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h |
major_idx_base >= p.major_dim) {
return;
}
for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w;
tap_idx += blockDim.x) {
int ky = tap_idx / kernel_w;
int kx = tap_idx - ky * kernel_w;
scalar_t v = 0.0;
if (kx < p.kernel_w & ky < p.kernel_h) {
v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];
}
sk[ky][kx] = v;
}
for (int loop_major = 0, major_idx = major_idx_base;
loop_major < p.loop_major & major_idx < p.major_dim;
loop_major++, major_idx++) {
for (int loop_x = 0, tile_out_x = tile_out_x_base;
loop_x < p.loop_x & tile_out_x < p.out_w;
loop_x++, tile_out_x += tile_out_w) {
int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;
int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;
int tile_in_x = floor_div(tile_mid_x, up_x);
int tile_in_y = floor_div(tile_mid_y, up_y);
__syncthreads();
for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w;
in_idx += blockDim.x) {
int rel_in_y = in_idx / tile_in_w;
int rel_in_x = in_idx - rel_in_y * tile_in_w;
int in_x = rel_in_x + tile_in_x;
int in_y = rel_in_y + tile_in_y;
scalar_t v = 0.0;
if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {
v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) *
p.minor_dim +
minor_idx];
}
sx[rel_in_y][rel_in_x] = v;
}
__syncthreads();
for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w;
out_idx += blockDim.x) {
int rel_out_y = out_idx / tile_out_w;
int rel_out_x = out_idx - rel_out_y * tile_out_w;
int out_x = rel_out_x + tile_out_x;
int out_y = rel_out_y + tile_out_y;
int mid_x = tile_mid_x + rel_out_x * down_x;
int mid_y = tile_mid_y + rel_out_y * down_y;
int in_x = floor_div(mid_x, up_x);
int in_y = floor_div(mid_y, up_y);
int rel_in_x = in_x - tile_in_x;
int rel_in_y = in_y - tile_in_y;
int kernel_x = (in_x + 1) * up_x - mid_x - 1;
int kernel_y = (in_y + 1) * up_y - mid_y - 1;
scalar_t v = 0.0;
#pragma unroll
for (int y = 0; y < kernel_h / up_y; y++)
#pragma unroll
for (int x = 0; x < kernel_w / up_x; x++)
v += sx[rel_in_y + y][rel_in_x + x] *
sk[kernel_y + y * up_y][kernel_x + x * up_x];
if (out_x < p.out_w & out_y < p.out_h) {
out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim +
minor_idx] = v;
}
}
}
}
}
torch::Tensor upfirdn2d_op(const torch::Tensor &input,
const torch::Tensor &kernel, int up_x, int up_y,
int down_x, int down_y, int pad_x0, int pad_x1,
int pad_y0, int pad_y1) {
int curDevice = -1;
cudaGetDevice(&curDevice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
UpFirDn2DKernelParams p;
auto x = input.contiguous();
auto k = kernel.contiguous();
p.major_dim = x.size(0);
p.in_h = x.size(1);
p.in_w = x.size(2);
p.minor_dim = x.size(3);
p.kernel_h = k.size(0);
p.kernel_w = k.size(1);
p.up_x = up_x;
p.up_y = up_y;
p.down_x = down_x;
p.down_y = down_y;
p.pad_x0 = pad_x0;
p.pad_x1 = pad_x1;
p.pad_y0 = pad_y0;
p.pad_y1 = pad_y1;
p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) /
p.down_y;
p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) /
p.down_x;
auto out =
at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());
int mode = -1;
int tile_out_h = -1;
int tile_out_w = -1;
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 1;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 3 && p.kernel_w <= 3) {
mode = 2;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 3;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 4;
tile_out_h = 16;
tile_out_w = 64;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 4 && p.kernel_w <= 4) {
mode = 5;
tile_out_h = 8;
tile_out_w = 32;
}
if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 &&
p.kernel_h <= 2 && p.kernel_w <= 2) {
mode = 6;
tile_out_h = 8;
tile_out_w = 32;
}
dim3 block_size;
dim3 grid_size;
if (tile_out_h > 0 && tile_out_w > 0) {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 1;
block_size = dim3(32 * 8, 1, 1);
grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,
(p.out_w - 1) / (p.loop_x * tile_out_w) + 1,
(p.major_dim - 1) / p.loop_major + 1);
} else {
p.loop_major = (p.major_dim - 1) / 16384 + 1;
p.loop_x = 4;
block_size = dim3(4, 32, 1);
grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1,
(p.out_w - 1) / (p.loop_x * block_size.y) + 1,
(p.major_dim - 1) / p.loop_major + 1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
switch (mode) {
case 1:
upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 2:
upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 3:
upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 4:
upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 5:
upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
case 6:
upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32>
<<<grid_size, block_size, 0, stream>>>(out.data_ptr<scalar_t>(),
x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
break;
default:
upfirdn2d_kernel_large<scalar_t><<<grid_size, block_size, 0, stream>>>(
out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(),
k.data_ptr<scalar_t>(), p);
}
});
return out;
}
|
325097e97d82b53f7f70fe34b10226bab81c66b8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "givens.h"
#include "matrix.h"
#include <math.h>
#include <stdio.h>
__host__ __device__
void givens(float a, float b, float *c, float *s, float *r) {
float h, d;
h = hypotf(a, b);
d = 1.0f/h;
*c = fabsf(a)*d;
*s = copysignf(d, a)*b;
*r = copysignf(1.0f, a)*h;
}
__global__ void print_matrix(float *A, int M, int N) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", A[i*N + j]);
}
printf("\n");
}
printf("*-------------------*\n\n");
}
__global__ void print_matrix_transpose(float *A, int M, int N) {
for (int j = 0; j < N; j++) {
for (int i = 0; i < M; i++) {
printf("%f ", A[i*N + j]);
}
printf("\n");
}
printf("*-------------------*\n\n");
}
/*
Performs QR factorization of A [M x N] and stores
the results in Q [M x M] and R [M x N]
*/
void givens_rotation(float *A, float *Q, float *R, int M, int N) {
float *d_A, *d_Q, *d_R_t, *d_A_t;
hipMalloc(&d_Q, M*M*sizeof(float));
hipMalloc(&d_R_t, M*N*sizeof(float));
hipMalloc(&d_A, M*N*sizeof(float));
hipMalloc(&d_A_t, M*N*sizeof(float));
hipMemcpy(d_A, A, M*N*sizeof(float), hipMemcpyHostToDevice);
// print_matrix<<<1,1>>>(d_A, M, N);
// hipDeviceSynchronize();
dim3 threads(16, 16);
dim3 blocks1((M+15)/16, (M+15)/16);
dim3 blocks2((M+15)/16, (N+15)/16);
dim3 blocks3((N+15)/16, (M+15)/16);
hipLaunchKernelGGL(( matrix_transpose_gpu), dim3(blocks2), dim3(threads), 0, 0, d_A, d_A_t, M, N);
hipLaunchKernelGGL(( identity), dim3(blocks1), dim3(threads), 0, 0, d_Q, M);
hipMemcpy(d_R_t, d_A_t, M*N*sizeof(float), hipMemcpyDeviceToDevice);
// TODO
// print_matrix<<<1,1>>>(d_Q, M, M);
// hipDeviceSynchronize();
for (int j = 0; j < N; j++) {
for (int i = M-1; i >= j+1; i--) {
float a, b;
hipMemcpy(&a, d_R_t+j*M+(i-1), sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&b, d_R_t+j*M+i, sizeof(float), hipMemcpyDeviceToHost);
// TODO
//printf("a = %f, b = %f\n", a, b);
if (b == 0.0f) {
continue;
}
float c, s, r;
givens(a, b, &c, &s, &r);
//TODO
// printf("a = %f, b = %f, c = %f, s = %f, r = %f\n", a, b, c, s, r);
hipLaunchKernelGGL(( givens_rotate_R), dim3((N+15)/16), dim3(16), 0, 0, d_R_t, M, N, i, j, c, s, r);
hipLaunchKernelGGL(( givens_rotate_Q), dim3((M+15)/16), dim3(16), 0, 0, d_Q, M, N, i, c, s);
// print_matrix_transpose<<<1,1>>>(d_R_t, N, M);
// hipDeviceSynchronize();
}
}
hipMemcpy(d_A_t, d_R_t, M*N*sizeof(float), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( matrix_transpose_gpu), dim3(blocks3), dim3(threads), 0, 0, d_A_t, d_R_t, N, M);
hipMemcpy(R, d_R_t, M*N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(Q, d_Q, M*M*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_Q);
hipFree(d_R_t);
hipFree(d_A);
hipFree(d_A_t);
}
__global__ void givens_rotate_R(float *R_t, int M, int N, int i, int rot_col,
float c, float s, float r) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < rot_col || j >= N) return;
if (j == rot_col) {
R_t[j*M + i - 1] = r;
R_t[j*M + i] = 0.0f;
}
else {
float a = R_t[j*M + i - 1];
float b = R_t[j*M + i];
R_t[j*M + i - 1] = c*a + s*b;
R_t[j*M + i] = -s*a + c*b;
// printf("j = %d, a = %f and b = %f\n", j, R_t[j*M + i - 1], R_t[j*M + i]);
}
}
__global__ void givens_rotate_Q(float *Q, int M, int N, int j, float c, float s) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= M) return;
float a = Q[i*M + j-1];
float b = Q[i*M + j];
Q[i*M + j-1] = c*a + s*b;
Q[i*M + j] = -s*a + c*b;
}
|
325097e97d82b53f7f70fe34b10226bab81c66b8.cu
|
#include "givens.h"
#include "matrix.h"
#include <math.h>
#include <stdio.h>
__host__ __device__
void givens(float a, float b, float *c, float *s, float *r) {
float h, d;
h = hypotf(a, b);
d = 1.0f/h;
*c = fabsf(a)*d;
*s = copysignf(d, a)*b;
*r = copysignf(1.0f, a)*h;
}
__global__ void print_matrix(float *A, int M, int N) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
printf("%f ", A[i*N + j]);
}
printf("\n");
}
printf("*-------------------*\n\n");
}
__global__ void print_matrix_transpose(float *A, int M, int N) {
for (int j = 0; j < N; j++) {
for (int i = 0; i < M; i++) {
printf("%f ", A[i*N + j]);
}
printf("\n");
}
printf("*-------------------*\n\n");
}
/*
Performs QR factorization of A [M x N] and stores
the results in Q [M x M] and R [M x N]
*/
void givens_rotation(float *A, float *Q, float *R, int M, int N) {
float *d_A, *d_Q, *d_R_t, *d_A_t;
cudaMalloc(&d_Q, M*M*sizeof(float));
cudaMalloc(&d_R_t, M*N*sizeof(float));
cudaMalloc(&d_A, M*N*sizeof(float));
cudaMalloc(&d_A_t, M*N*sizeof(float));
cudaMemcpy(d_A, A, M*N*sizeof(float), cudaMemcpyHostToDevice);
// print_matrix<<<1,1>>>(d_A, M, N);
// cudaDeviceSynchronize();
dim3 threads(16, 16);
dim3 blocks1((M+15)/16, (M+15)/16);
dim3 blocks2((M+15)/16, (N+15)/16);
dim3 blocks3((N+15)/16, (M+15)/16);
matrix_transpose_gpu<<<blocks2, threads>>>(d_A, d_A_t, M, N);
identity<<<blocks1, threads>>>(d_Q, M);
cudaMemcpy(d_R_t, d_A_t, M*N*sizeof(float), cudaMemcpyDeviceToDevice);
// TODO
// print_matrix<<<1,1>>>(d_Q, M, M);
// cudaDeviceSynchronize();
for (int j = 0; j < N; j++) {
for (int i = M-1; i >= j+1; i--) {
float a, b;
cudaMemcpy(&a, d_R_t+j*M+(i-1), sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&b, d_R_t+j*M+i, sizeof(float), cudaMemcpyDeviceToHost);
// TODO
//printf("a = %f, b = %f\n", a, b);
if (b == 0.0f) {
continue;
}
float c, s, r;
givens(a, b, &c, &s, &r);
//TODO
// printf("a = %f, b = %f, c = %f, s = %f, r = %f\n", a, b, c, s, r);
givens_rotate_R<<<(N+15)/16, 16>>>(d_R_t, M, N, i, j, c, s, r);
givens_rotate_Q<<<(M+15)/16, 16>>>(d_Q, M, N, i, c, s);
// print_matrix_transpose<<<1,1>>>(d_R_t, N, M);
// cudaDeviceSynchronize();
}
}
cudaMemcpy(d_A_t, d_R_t, M*N*sizeof(float), cudaMemcpyDeviceToDevice);
matrix_transpose_gpu<<<blocks3, threads>>>(d_A_t, d_R_t, N, M);
cudaMemcpy(R, d_R_t, M*N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Q, d_Q, M*M*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_Q);
cudaFree(d_R_t);
cudaFree(d_A);
cudaFree(d_A_t);
}
__global__ void givens_rotate_R(float *R_t, int M, int N, int i, int rot_col,
float c, float s, float r) {
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < rot_col || j >= N) return;
if (j == rot_col) {
R_t[j*M + i - 1] = r;
R_t[j*M + i] = 0.0f;
}
else {
float a = R_t[j*M + i - 1];
float b = R_t[j*M + i];
R_t[j*M + i - 1] = c*a + s*b;
R_t[j*M + i] = -s*a + c*b;
// printf("j = %d, a = %f and b = %f\n", j, R_t[j*M + i - 1], R_t[j*M + i]);
}
}
__global__ void givens_rotate_Q(float *Q, int M, int N, int j, float c, float s) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= M) return;
float a = Q[i*M + j-1];
float b = Q[i*M + j];
Q[i*M + j-1] = c*a + s*b;
Q[i*M + j] = -s*a + c*b;
}
|
6b306bc660a3d86d7b2c3c20758a351de037ff66.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
#include <iostream>
#define N 1000000
using namespace std;
void warmUpGPU();
__global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C);
int main(int argc, char *argv[])
{
warmUpGPU();
//change OpenMP settings:
omp_set_num_threads(1);
unsigned int * A;
unsigned int * B;
unsigned int * C;
A=(unsigned int *)malloc(sizeof(unsigned int)*N);
B=(unsigned int *)malloc(sizeof(unsigned int)*N);
C=(unsigned int *)malloc(sizeof(unsigned int)*N);
printf("\nSize of A+B+C (GiB): %f",(sizeof(unsigned int)*N*3.0)/(1024.0*1024.0*1024.0));
//init:
int i=0;
for (i=0; i<N; i++){
A[i]=i;
B[i]=i;
C[i]=0;
}
double tstart=omp_get_wtime();
//CPU version:
/*
for (int i=0; i<N; i++){
C[i]=A[i]+B[i];
}
*/
//CUDA error code:
hipError_t errCode=hipSuccess;
if(errCode != hipSuccess)
{
cout << "\nLast error: " << errCode << endl;
}
unsigned int * dev_A;
unsigned int * dev_B;
unsigned int * dev_C;
//allocate on the device: A, B, C
errCode=hipMalloc((unsigned int**)&dev_A, sizeof(unsigned int)*N);
if(errCode != hipSuccess) {
cout << "\nError: A error with code " << errCode << endl;
}
errCode=hipMalloc((unsigned int**)&dev_B, sizeof(unsigned int)*N);
if(errCode != hipSuccess) {
cout << "\nError: B error with code " << errCode << endl;
}
errCode=hipMalloc((unsigned int**)&dev_C, sizeof(unsigned int)*N);
if(errCode != hipSuccess) {
cout << "\nError: C error with code " << errCode << endl;
}
//copy A to device
errCode=hipMemcpy( dev_A, A, sizeof(unsigned int)*N, hipMemcpyHostToDevice);
if(errCode != hipSuccess) {
cout << "\nError: A memcpy error with code " << errCode << endl;
}
//copy B to device
errCode=hipMemcpy( dev_B, B, sizeof(unsigned int)*N, hipMemcpyHostToDevice);
if(errCode != hipSuccess) {
cout << "\nError: A memcpy error with code " << errCode << endl;
}
//copy C to device (initialized to 0)
errCode=hipMemcpy( dev_C, C, sizeof(unsigned int)*N, hipMemcpyHostToDevice);
if(errCode != hipSuccess) {
cout << "\nError: A memcpy error with code " << errCode << endl;
}
//execute kernel
const unsigned int totalBlocks=ceil(N*1.0/1024.0);
printf("\ntotal blocks: %d",totalBlocks);
hipLaunchKernelGGL(( vectorAdd), dim3(totalBlocks),dim3(1024), 0, 0, dev_A, dev_B, dev_C);
if(errCode != hipSuccess){
cout<<"Error afrer kernel launch "<<errCode<<endl;
}
//copy data from device to host
errCode=hipMemcpy( C, dev_C, sizeof(unsigned int)*N, hipMemcpyDeviceToHost);
if(errCode != hipSuccess) {
cout << "\nError: getting C result form GPU error with code " << errCode << endl;
}
hipDeviceSynchronize();
for (int i=N-10; i<N; i++)
{
printf("\n%d",C[i]);
}
double tend=omp_get_wtime();
printf("\nTotal time (s): %f",tend-tstart);
printf("\n");
return 0;
}
__global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C) {
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>N){
return;
}
C[tid]=A[tid]+B[tid];
return;
}
__global__ void warmup(unsigned int * tmp) {
if (threadIdx.x==0)
*tmp=555;
return;
}
void warmUpGPU(){
printf("\nWarming up GPU for time trialing...\n");
unsigned int * dev_tmp;
unsigned int * tmp;
tmp=(unsigned int*)malloc(sizeof(unsigned int));
*tmp=0;
hipError_t errCode=hipSuccess;
errCode=hipMalloc((unsigned int**)&dev_tmp, sizeof(unsigned int));
if(errCode != hipSuccess) {
cout << "\nError: dev_tmp error with code " << errCode << endl;
}
hipLaunchKernelGGL(( warmup), dim3(1),dim3(256), 0, 0, dev_tmp);
//copy data from device to host
errCode=hipMemcpy( tmp, dev_tmp, sizeof(unsigned int), hipMemcpyDeviceToHost);
if(errCode != hipSuccess) {
cout << "\nError: getting tmp result form GPU error with code " << errCode << endl;
}
hipDeviceSynchronize();
printf("\ntmp (changed to 555 on GPU): %d",*tmp);
hipFree(dev_tmp);
return;
}
|
6b306bc660a3d86d7b2c3c20758a351de037ff66.cu
|
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
#include <iostream>
#define N 1000000
using namespace std;
void warmUpGPU();
__global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C);
int main(int argc, char *argv[])
{
warmUpGPU();
//change OpenMP settings:
omp_set_num_threads(1);
unsigned int * A;
unsigned int * B;
unsigned int * C;
A=(unsigned int *)malloc(sizeof(unsigned int)*N);
B=(unsigned int *)malloc(sizeof(unsigned int)*N);
C=(unsigned int *)malloc(sizeof(unsigned int)*N);
printf("\nSize of A+B+C (GiB): %f",(sizeof(unsigned int)*N*3.0)/(1024.0*1024.0*1024.0));
//init:
int i=0;
for (i=0; i<N; i++){
A[i]=i;
B[i]=i;
C[i]=0;
}
double tstart=omp_get_wtime();
//CPU version:
/*
for (int i=0; i<N; i++){
C[i]=A[i]+B[i];
}
*/
//CUDA error code:
cudaError_t errCode=cudaSuccess;
if(errCode != cudaSuccess)
{
cout << "\nLast error: " << errCode << endl;
}
unsigned int * dev_A;
unsigned int * dev_B;
unsigned int * dev_C;
//allocate on the device: A, B, C
errCode=cudaMalloc((unsigned int**)&dev_A, sizeof(unsigned int)*N);
if(errCode != cudaSuccess) {
cout << "\nError: A error with code " << errCode << endl;
}
errCode=cudaMalloc((unsigned int**)&dev_B, sizeof(unsigned int)*N);
if(errCode != cudaSuccess) {
cout << "\nError: B error with code " << errCode << endl;
}
errCode=cudaMalloc((unsigned int**)&dev_C, sizeof(unsigned int)*N);
if(errCode != cudaSuccess) {
cout << "\nError: C error with code " << errCode << endl;
}
//copy A to device
errCode=cudaMemcpy( dev_A, A, sizeof(unsigned int)*N, cudaMemcpyHostToDevice);
if(errCode != cudaSuccess) {
cout << "\nError: A memcpy error with code " << errCode << endl;
}
//copy B to device
errCode=cudaMemcpy( dev_B, B, sizeof(unsigned int)*N, cudaMemcpyHostToDevice);
if(errCode != cudaSuccess) {
cout << "\nError: A memcpy error with code " << errCode << endl;
}
//copy C to device (initialized to 0)
errCode=cudaMemcpy( dev_C, C, sizeof(unsigned int)*N, cudaMemcpyHostToDevice);
if(errCode != cudaSuccess) {
cout << "\nError: A memcpy error with code " << errCode << endl;
}
//execute kernel
const unsigned int totalBlocks=ceil(N*1.0/1024.0);
printf("\ntotal blocks: %d",totalBlocks);
vectorAdd<<<totalBlocks,1024>>>(dev_A, dev_B, dev_C);
if(errCode != cudaSuccess){
cout<<"Error afrer kernel launch "<<errCode<<endl;
}
//copy data from device to host
errCode=cudaMemcpy( C, dev_C, sizeof(unsigned int)*N, cudaMemcpyDeviceToHost);
if(errCode != cudaSuccess) {
cout << "\nError: getting C result form GPU error with code " << errCode << endl;
}
cudaDeviceSynchronize();
for (int i=N-10; i<N; i++)
{
printf("\n%d",C[i]);
}
double tend=omp_get_wtime();
printf("\nTotal time (s): %f",tend-tstart);
printf("\n");
return 0;
}
__global__ void vectorAdd(unsigned int * A, unsigned int * B, unsigned int * C) {
unsigned int tid=threadIdx.x+ (blockIdx.x*blockDim.x);
if (tid>N){
return;
}
C[tid]=A[tid]+B[tid];
return;
}
__global__ void warmup(unsigned int * tmp) {
if (threadIdx.x==0)
*tmp=555;
return;
}
void warmUpGPU(){
printf("\nWarming up GPU for time trialing...\n");
unsigned int * dev_tmp;
unsigned int * tmp;
tmp=(unsigned int*)malloc(sizeof(unsigned int));
*tmp=0;
cudaError_t errCode=cudaSuccess;
errCode=cudaMalloc((unsigned int**)&dev_tmp, sizeof(unsigned int));
if(errCode != cudaSuccess) {
cout << "\nError: dev_tmp error with code " << errCode << endl;
}
warmup<<<1,256>>>(dev_tmp);
//copy data from device to host
errCode=cudaMemcpy( tmp, dev_tmp, sizeof(unsigned int), cudaMemcpyDeviceToHost);
if(errCode != cudaSuccess) {
cout << "\nError: getting tmp result form GPU error with code " << errCode << endl;
}
cudaDeviceSynchronize();
printf("\ntmp (changed to 555 on GPU): %d",*tmp);
cudaFree(dev_tmp);
return;
}
|
a2400bbf38feeda8791f815b6e12ec77e7a321b0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "training/graph_group_async.h"
#include "tensors/tensor_operators.h"
#include "functional/functional.h"
namespace marian {
void AsyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see changes of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for(auto opt : shardOpt_)
scheduler_->registerTrainingObserver(opt);
}
void AsyncGraphGroup::fetchParams(Tensor oldParams,
const std::vector<Tensor>& params,
int device_id) {
// @TODO read guard on parameters
int pos = 0;
std::vector<std::thread> threads;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]);
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads) {
t.join();
}
}
void AsyncGraphGroup::pushGradients(Tensor newGrads,
size_t batch_words,
int device_id) {
// add instead of copy?
std::vector<std::thread> threads;
int pos = 0;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
grads_[idx]->copyFrom(newGrads->subtensor(pos, grads_[idx]->size()));
if(scaleLearningRate_) {
shardOpt_[idx]->update(
params_[idx], grads_[idx], batch_words / avgBatchWords_);
} else {
shardOpt_[idx]->update(params_[idx], grads_[idx]);
}
if(movingAvg_)
updateMovingAverage(
paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches());
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads)
t.join();
}
void AsyncGraphGroup::updateMovingAverage(Tensor paramsAvg,
Tensor params,
size_t batches) {
using namespace functional;
float decay
= ::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10));
Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params);
}
void AsyncGraphGroup::init(Ptr<data::Batch> batch) {
// initialize the parameters
{
ThreadPool pool(graphs_.size(), graphs_.size());
for(size_t i = 0; i < graphs_.size(); ++i) {
auto init = [&](size_t i) {
builders_[i]->build(graphs_[i], batch);
graphs_[i]->forward();
};
pool.enqueue(init, i);
}
}
if(params_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
shardSize_ = ceil(totalSize / (float)devices_.size());
int pos = 0;
// parameter sharding
for(auto graph : graphs_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor param;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(graph->getBackend());
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(param, {1, __size__});
paramsAlloc_.push_back(allocator);
param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__));
params_.push_back(param);
pos += __size__;
}
}
if(grads_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
for(auto graph : graphs_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor grad_;
Ptr<TensorAllocator> allocator_ = New<TensorAllocator>(graph->getBackend());
allocator_->reserveExact(__size__ * sizeof(float));
allocator_->allocate(grad_, {1, __size__});
gradsAlloc_.push_back(allocator_);
grads_.push_back(grad_);
}
}
if(movingAvg_) {
if(paramsAvg_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
int i = 0;
for(auto graph : graphs_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor paramAvg;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(graph->getBackend());
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(paramAvg, {1, __size__});
paramAvg->copyFrom(params_[i++]);
paramsAllocAvg_.push_back(allocator);
paramsAvg_.push_back(paramAvg);
}
}
}
}
void AsyncGraphGroup::execute(Ptr<data::Batch> batch) {
if(first_) {
init(batch);
first_ = false;
}
auto task = [this](Ptr<data::Batch> batch) {
static size_t i = 0;
thread_local Ptr<ExpressionGraph> graph;
thread_local Ptr<models::ModelBase> builder;
thread_local size_t t = 0;
thread_local size_t num_seen_words = 0;
thread_local int t_id = 0;
thread_local Tensor accGradients;
thread_local Ptr<TensorAllocator> accAlloc;
if(!graph) {
std::lock_guard<std::mutex> lock(sync_);
t_id = i;
graph = graphs_[i];
builder = builders_[i++];
}
auto costNode = builder->build(graph, batch);
if(t % tau_ == 0) {
fetchParams(graph->params()->vals(), params_, t_id);
}
graph->forward();
float cost = costNode->scalar();
graph->backward();
// Get batch stats
size_t batch_words = batch->words();
Tensor gradients;
if(tau_ > 1) {
if(t == 0) {
accAlloc = New<TensorAllocator>(graph->getBackend());
accAlloc->reserveExact(graph->params()->grads()->memory()->size());
accAlloc->allocate(accGradients, graph->params()->grads()->shape());
accGradients->set(0);
}
using namespace functional;
Element(_1 += _2, accGradients, graph->params()->grads());
gradients = accGradients;
// Keep track of how many words we've calculated the error from
num_seen_words += batch_words;
} else {
gradients = graph->params()->grads();
num_seen_words = batch_words;
}
t++;
if(t % tau_ == 0) {
pushGradients(gradients, num_seen_words, t_id);
// Reset the counter of seen words after gradient update
num_seen_words = 0;
if(tau_ > 1)
gradients->set(0);
}
if(scheduler_) {
std::unique_lock<std::mutex> lock(schedulerMutex_);
// Wait until the thread that wants to do validation is finished.
pool_->wait_for_one(lock);
scheduler_->update(cost, batch);
if(scheduler_->saving() || scheduler_->validating()) {
// Wait with validation or saving until all other threads are done with update.
// We want to reuse the graphs for validation, so they need to be in
// a safe state.
pool_->wait_for_others(lock);
if(movingAvg_)
for(auto g : graphs_)
fetchParams(g->params()->vals(), paramsAvg_, t_id);
if(scheduler_->saving())
this->save(graph);
if(scheduler_->validating())
scheduler_->validate(graphs_);
// Validation or saving is done, tell other threads to continue work.
pool_->notify_others();
}
}
};
pool_->enqueue(task, batch);
}
void AsyncGraphGroup::wait() {
{
std::unique_lock<std::mutex> lock(schedulerMutex_);
pool_->wait_for_others(lock);
pool_->notify_others();
}
}
}
|
a2400bbf38feeda8791f815b6e12ec77e7a321b0.cu
|
#include "training/graph_group_async.h"
#include "tensors/tensor_operators.h"
#include "functional/functional.h"
namespace marian {
void AsyncGraphGroup::setScheduler(Ptr<Scheduler> scheduler) {
scheduler_ = scheduler;
// optimizer has to be registered last to see changes of learning rate
scheduler_->registerTrainingObserver(scheduler_);
for(auto opt : shardOpt_)
scheduler_->registerTrainingObserver(opt);
}
void AsyncGraphGroup::fetchParams(Tensor oldParams,
const std::vector<Tensor>& params,
int device_id) {
// @TODO read guard on parameters
int pos = 0;
std::vector<std::thread> threads;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
oldParams->subtensor(pos, params[idx]->size())->copyFrom(params[idx]);
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads) {
t.join();
}
}
void AsyncGraphGroup::pushGradients(Tensor newGrads,
size_t batch_words,
int device_id) {
// add instead of copy?
std::vector<std::thread> threads;
int pos = 0;
for(int idx = 0; idx < devices_.size(); idx++) {
threads.emplace_back(std::thread(
[&](int idx, int pos) {
// individual mutex per-shard
std::lock_guard<std::mutex> guard(shardSync_[idx]);
grads_[idx]->copyFrom(newGrads->subtensor(pos, grads_[idx]->size()));
if(scaleLearningRate_) {
shardOpt_[idx]->update(
params_[idx], grads_[idx], batch_words / avgBatchWords_);
} else {
shardOpt_[idx]->update(params_[idx], grads_[idx]);
}
if(movingAvg_)
updateMovingAverage(
paramsAvg_[idx], params_[idx], scheduler_->numberOfBatches());
},
idx,
pos));
pos += shardSize_;
}
for(auto&& t : threads)
t.join();
}
void AsyncGraphGroup::updateMovingAverage(Tensor paramsAvg,
Tensor params,
size_t batches) {
using namespace functional;
float decay
= std::max(mvDecay_, 1.f - (float)(batches + 1) / (float)(batches + 10));
Element(_1 = ((1.f - decay) * _1) + (decay * _2), paramsAvg, params);
}
void AsyncGraphGroup::init(Ptr<data::Batch> batch) {
// initialize the parameters
{
ThreadPool pool(graphs_.size(), graphs_.size());
for(size_t i = 0; i < graphs_.size(); ++i) {
auto init = [&](size_t i) {
builders_[i]->build(graphs_[i], batch);
graphs_[i]->forward();
};
pool.enqueue(init, i);
}
}
if(params_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
shardSize_ = ceil(totalSize / (float)devices_.size());
int pos = 0;
// parameter sharding
for(auto graph : graphs_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor param;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(graph->getBackend());
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(param, {1, __size__});
paramsAlloc_.push_back(allocator);
param->copyFrom(graphs_[0]->params()->vals()->subtensor(pos, __size__));
params_.push_back(param);
pos += __size__;
}
}
if(grads_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
for(auto graph : graphs_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor grad_;
Ptr<TensorAllocator> allocator_ = New<TensorAllocator>(graph->getBackend());
allocator_->reserveExact(__size__ * sizeof(float));
allocator_->allocate(grad_, {1, __size__});
gradsAlloc_.push_back(allocator_);
grads_.push_back(grad_);
}
}
if(movingAvg_) {
if(paramsAvg_.size() == 0) {
int totalSize = graphs_[0]->params()->vals()->size();
int i = 0;
for(auto graph : graphs_) {
int __size__ = min(shardSize_, totalSize);
totalSize -= __size__;
Tensor paramAvg;
Ptr<TensorAllocator> allocator = New<TensorAllocator>(graph->getBackend());
allocator->reserveExact(__size__ * sizeof(float));
allocator->allocate(paramAvg, {1, __size__});
paramAvg->copyFrom(params_[i++]);
paramsAllocAvg_.push_back(allocator);
paramsAvg_.push_back(paramAvg);
}
}
}
}
void AsyncGraphGroup::execute(Ptr<data::Batch> batch) {
if(first_) {
init(batch);
first_ = false;
}
auto task = [this](Ptr<data::Batch> batch) {
static size_t i = 0;
thread_local Ptr<ExpressionGraph> graph;
thread_local Ptr<models::ModelBase> builder;
thread_local size_t t = 0;
thread_local size_t num_seen_words = 0;
thread_local int t_id = 0;
thread_local Tensor accGradients;
thread_local Ptr<TensorAllocator> accAlloc;
if(!graph) {
std::lock_guard<std::mutex> lock(sync_);
t_id = i;
graph = graphs_[i];
builder = builders_[i++];
}
auto costNode = builder->build(graph, batch);
if(t % tau_ == 0) {
fetchParams(graph->params()->vals(), params_, t_id);
}
graph->forward();
float cost = costNode->scalar();
graph->backward();
// Get batch stats
size_t batch_words = batch->words();
Tensor gradients;
if(tau_ > 1) {
if(t == 0) {
accAlloc = New<TensorAllocator>(graph->getBackend());
accAlloc->reserveExact(graph->params()->grads()->memory()->size());
accAlloc->allocate(accGradients, graph->params()->grads()->shape());
accGradients->set(0);
}
using namespace functional;
Element(_1 += _2, accGradients, graph->params()->grads());
gradients = accGradients;
// Keep track of how many words we've calculated the error from
num_seen_words += batch_words;
} else {
gradients = graph->params()->grads();
num_seen_words = batch_words;
}
t++;
if(t % tau_ == 0) {
pushGradients(gradients, num_seen_words, t_id);
// Reset the counter of seen words after gradient update
num_seen_words = 0;
if(tau_ > 1)
gradients->set(0);
}
if(scheduler_) {
std::unique_lock<std::mutex> lock(schedulerMutex_);
// Wait until the thread that wants to do validation is finished.
pool_->wait_for_one(lock);
scheduler_->update(cost, batch);
if(scheduler_->saving() || scheduler_->validating()) {
// Wait with validation or saving until all other threads are done with update.
// We want to reuse the graphs for validation, so they need to be in
// a safe state.
pool_->wait_for_others(lock);
if(movingAvg_)
for(auto g : graphs_)
fetchParams(g->params()->vals(), paramsAvg_, t_id);
if(scheduler_->saving())
this->save(graph);
if(scheduler_->validating())
scheduler_->validate(graphs_);
// Validation or saving is done, tell other threads to continue work.
pool_->notify_others();
}
}
};
pool_->enqueue(task, batch);
}
void AsyncGraphGroup::wait() {
{
std::unique_lock<std::mutex> lock(schedulerMutex_);
pool_->wait_for_others(lock);
pool_->notify_others();
}
}
}
|
9c2e8893ae0782b8b821f6d9bad3dc7564d677a2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/allocators/bitarray.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
using namespace poggers::allocators;
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void malloc_test_kernel_prefetching(bitarr_grouped<4> * global_bitarray, storage_bitmap<4> * local_bitmaps, uint64_t max_mallocs, uint64_t mallocs_per_thread){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >=max_mallocs) return;
int my_core = poggers::utils::get_smid();
//printf("%d\n", my_core);
storage_bitmap<4> * my_bitmap = &local_bitmaps[my_core];
//storage_bitmap<4> * my_bitmap = storage_bitmap<4>::get_my_bitmap(local_bitmaps);
uint64_t blockID = blockIdx.x;
// if (threadIdx.x == 0){
// printf("%llu\n", blockID);
// }
for (int i =0; i < mallocs_per_thread; i++){
void * my_allocation = my_bitmap->malloc_from_existing();
//void * my_allocation = nullptr;
if (my_allocation) continue;
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
//doesn't need
//my_allocation = nullptr;
bool should_preempt = false;
uint64_t * ext_address = nullptr;
uint64_t remaining_metadata = 0;
//grouped threads is the local group!
//need the bigga one
cg::coalesced_group grouped_threads = global_bitarray[blockID].metadata_malloc(my_allocation, should_preempt, ext_address, remaining_metadata);
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
if (grouped_threads.thread_rank() == 0 && remaining_metadata != 0ULL){
//printf("Size: %d, popcount: %d\n", grouped_threads.size(), __popcll(remaining_metadata));
my_bitmap->attach_buffer(ext_address, remaining_metadata);
}
}
if (tid == 0){
printf("%llx %llx\n", global_bitarray[blockID].check_attachment(), my_bitmap->check_attachment());
}
}
__global__ void malloc_test_kernel_split_local(bitarr_grouped<4> * global_bitarray, storage_bitmap<4> * local_bitmaps, uint64_t max_mallocs, uint64_t mallocs_per_thread){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >=max_mallocs) return;
int my_core = poggers::utils::get_smid();
//printf("%d\n", my_core);
//storage_bitmap<4> * my_bitmap = storage_bitmap<4>::get_my_bitmap(local_bitmaps);
uint64_t blockID = blockIdx.x;
storage_bitmap<4> * my_bitmap = &local_bitmaps[blockID];
// if (threadIdx.x == 0){
// printf("%llu\n", blockID);
// }
for (int i =0; i < mallocs_per_thread; i++){
void * my_allocation = my_bitmap->malloc_from_existing();
//void * my_allocation = nullptr;
if (my_allocation) continue;
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
//doesn't need
//my_allocation = nullptr;
bool should_preempt = false;
uint64_t * ext_address = nullptr;
uint64_t remaining_metadata = 0;
//grouped threads is the local group!
//need the bigga one
cg::coalesced_group grouped_threads = global_bitarray[blockID].metadata_malloc(my_allocation, should_preempt, ext_address, remaining_metadata);
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
if (grouped_threads.thread_rank() == 0 && remaining_metadata != 0ULL){
//printf("Size: %d, popcount: %d\n", grouped_threads.size(), __popcll(remaining_metadata));
my_bitmap->attach_buffer(ext_address, remaining_metadata);
}
}
if (tid == 0){
printf("%llx %llx\n", global_bitarray[blockID].check_attachment(), my_bitmap->check_attachment());
}
}
__global__ void malloc_test_correctness_kernel(bitarr_grouped<4> * global_bitarray, uint64_t * counters, uint64_t max_mallocs, uint64_t mallocs_per_thread){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= max_mallocs) return;
uint64_t blockID = blockIdx.x;
uint64_t my_val = global_bitarray[blockID].malloc(blockID);
uint64_t old_counter = atomicAdd((unsigned long long int *)&counters[my_val], 1ULL);
assert(old_counter == 0);
}
__global__ void malloc_init_kernel(bitarr_grouped<4> * global_bitarray, uint64_t num_bit_arr){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= num_bit_arr) return;
global_bitarray[tid].init();
}
__host__ void build_bitarr_test_prefetch(uint64_t max_mallocs, uint64_t mallocs_per_thread, uint64_t block_size){
uint64_t max_blocks = (max_mallocs-1)/block_size+1;
bitarr_grouped<4> * dev_bitarray;
hipMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
hipDeviceSynchronize();
assert(dev_bitarray != nullptr);
hipLaunchKernelGGL(( malloc_init_kernel), dim3((max_blocks -1)/512+1), dim3(512), 0, 0, dev_bitarray, max_blocks);
hipDeviceSynchronize();
storage_bitmap<4> * local_bitmaps = storage_bitmap<4>::generate_buffers();
printf("Done with init\n");
auto bitarr_start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( malloc_test_kernel_prefetching), dim3((max_mallocs -1)/block_size+1), dim3(block_size), 0, 0, dev_bitarray, local_bitmaps, max_mallocs, mallocs_per_thread);
hipDeviceSynchronize();
auto bitarr_end = std::chrono::high_resolution_clock::now();
printf("Done with speed test\n");
std::chrono::duration<double> bit_diff = bitarr_end - bitarr_start;
std::cout << "bitarr Malloced " << max_mallocs*mallocs_per_thread << " in " << bit_diff.count() << " seconds, " << block_size << "max block size\n";
printf("%f allocs per second\n", ((double) max_mallocs*mallocs_per_thread)/ bit_diff.count());
hipDeviceSynchronize();
hipFree(dev_bitarray);
hipDeviceSynchronize();
uint64_t * max_counters;
hipMalloc((void ** )&max_counters, sizeof(uint64_t)*max_blocks*4096);
assert(max_counters != nullptr);
hipMemset(max_counters, 0, sizeof(uint64_t)*max_blocks*4096);
//and boot correctness test
hipMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
assert(dev_bitarray != nullptr);
hipDeviceSynchronize();
hipLaunchKernelGGL(( malloc_init_kernel), dim3((max_blocks -1)/512+1), dim3(512), 0, 0, dev_bitarray, max_blocks);
hipDeviceSynchronize();
//malloc_test_correctness_kernel<<<(max_mallocs -1)/block_size+1, block_size>>>(dev_bitarray, max_counters, max_mallocs, mallocs_per_thread);
hipDeviceSynchronize();
hipFree(max_counters);
hipFree(dev_bitarray);
}
__host__ void build_bitarr_test_split(uint64_t max_mallocs, uint64_t mallocs_per_thread, uint64_t block_size){
uint64_t max_blocks = (max_mallocs-1)/block_size+1;
bitarr_grouped<4> * dev_bitarray;
hipMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
hipDeviceSynchronize();
assert(dev_bitarray != nullptr);
hipLaunchKernelGGL(( malloc_init_kernel), dim3((max_blocks -1)/512+1), dim3(512), 0, 0, dev_bitarray, max_blocks);
hipDeviceSynchronize();
storage_bitmap<4> * local_bitmaps = storage_bitmap<4>::generate_buffers_blocks(max_blocks);
printf("Done with init\n");
auto bitarr_start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( malloc_test_kernel_split_local), dim3((max_mallocs -1)/block_size+1), dim3(block_size), 0, 0, dev_bitarray, local_bitmaps, max_mallocs, mallocs_per_thread);
hipDeviceSynchronize();
auto bitarr_end = std::chrono::high_resolution_clock::now();
printf("Done with speed test\n");
std::chrono::duration<double> bit_diff = bitarr_end - bitarr_start;
std::cout << "bitarr Malloced " << max_mallocs*mallocs_per_thread << " in " << bit_diff.count() << " seconds, " << block_size << "max block size\n";
printf("%f allocs per second\n", ((double) max_mallocs*mallocs_per_thread)/ bit_diff.count());
hipDeviceSynchronize();
hipFree(dev_bitarray);
hipDeviceSynchronize();
uint64_t * max_counters;
hipMalloc((void ** )&max_counters, sizeof(uint64_t)*max_blocks*4096);
assert(max_counters != nullptr);
hipMemset(max_counters, 0, sizeof(uint64_t)*max_blocks*4096);
//and boot correctness test
hipMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
assert(dev_bitarray != nullptr);
hipDeviceSynchronize();
hipLaunchKernelGGL(( malloc_init_kernel), dim3((max_blocks -1)/512+1), dim3(512), 0, 0, dev_bitarray, max_blocks);
hipDeviceSynchronize();
//malloc_test_correctness_kernel<<<(max_mallocs -1)/block_size+1, block_size>>>(dev_bitarray, max_counters, max_mallocs, mallocs_per_thread);
hipDeviceSynchronize();
hipFree(max_counters);
hipFree(dev_bitarray);
}
int main(int argc, char** argv) {
//1 mil
build_bitarr_test_prefetch(512, 2, 512);
//10 mil
//build_bitarr_test_prefetch(10000000, 1, 512);
// //100 mil
// build_bitarr_test_prefetch(100000000, 1, 512);
// //1 mil
// build_bitarr_test_prefetch(1000000, 1, 1024);
// //10 mil
// build_bitarr_test_prefetch(10000000, 1, 1024);
// //100 mil
// build_bitarr_test_prefetch(100000000, 1, 1024);
return 0;
}
|
9c2e8893ae0782b8b821f6d9bad3dc7564d677a2.cu
|
/*
* ============================================================================
*
* Authors:
* Hunter McCoy <[email protected]
*
* ============================================================================
*/
#include <poggers/allocators/bitarray.cuh>
#include <stdio.h>
#include <iostream>
#include <assert.h>
#include <chrono>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
using namespace poggers::allocators;
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void malloc_test_kernel_prefetching(bitarr_grouped<4> * global_bitarray, storage_bitmap<4> * local_bitmaps, uint64_t max_mallocs, uint64_t mallocs_per_thread){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >=max_mallocs) return;
int my_core = poggers::utils::get_smid();
//printf("%d\n", my_core);
storage_bitmap<4> * my_bitmap = &local_bitmaps[my_core];
//storage_bitmap<4> * my_bitmap = storage_bitmap<4>::get_my_bitmap(local_bitmaps);
uint64_t blockID = blockIdx.x;
// if (threadIdx.x == 0){
// printf("%llu\n", blockID);
// }
for (int i =0; i < mallocs_per_thread; i++){
void * my_allocation = my_bitmap->malloc_from_existing();
//void * my_allocation = nullptr;
if (my_allocation) continue;
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
//doesn't need
//my_allocation = nullptr;
bool should_preempt = false;
uint64_t * ext_address = nullptr;
uint64_t remaining_metadata = 0;
//grouped threads is the local group!
//need the bigga one
cg::coalesced_group grouped_threads = global_bitarray[blockID].metadata_malloc(my_allocation, should_preempt, ext_address, remaining_metadata);
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
if (grouped_threads.thread_rank() == 0 && remaining_metadata != 0ULL){
//printf("Size: %d, popcount: %d\n", grouped_threads.size(), __popcll(remaining_metadata));
my_bitmap->attach_buffer(ext_address, remaining_metadata);
}
}
if (tid == 0){
printf("%llx %llx\n", global_bitarray[blockID].check_attachment(), my_bitmap->check_attachment());
}
}
__global__ void malloc_test_kernel_split_local(bitarr_grouped<4> * global_bitarray, storage_bitmap<4> * local_bitmaps, uint64_t max_mallocs, uint64_t mallocs_per_thread){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >=max_mallocs) return;
int my_core = poggers::utils::get_smid();
//printf("%d\n", my_core);
//storage_bitmap<4> * my_bitmap = storage_bitmap<4>::get_my_bitmap(local_bitmaps);
uint64_t blockID = blockIdx.x;
storage_bitmap<4> * my_bitmap = &local_bitmaps[blockID];
// if (threadIdx.x == 0){
// printf("%llu\n", blockID);
// }
for (int i =0; i < mallocs_per_thread; i++){
void * my_allocation = my_bitmap->malloc_from_existing();
//void * my_allocation = nullptr;
if (my_allocation) continue;
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
//doesn't need
//my_allocation = nullptr;
bool should_preempt = false;
uint64_t * ext_address = nullptr;
uint64_t remaining_metadata = 0;
//grouped threads is the local group!
//need the bigga one
cg::coalesced_group grouped_threads = global_bitarray[blockID].metadata_malloc(my_allocation, should_preempt, ext_address, remaining_metadata);
// __syncthreads();
// assert(my_bitmap->check_attachment() == 0ULL);
// __syncthreads();
if (grouped_threads.thread_rank() == 0 && remaining_metadata != 0ULL){
//printf("Size: %d, popcount: %d\n", grouped_threads.size(), __popcll(remaining_metadata));
my_bitmap->attach_buffer(ext_address, remaining_metadata);
}
}
if (tid == 0){
printf("%llx %llx\n", global_bitarray[blockID].check_attachment(), my_bitmap->check_attachment());
}
}
__global__ void malloc_test_correctness_kernel(bitarr_grouped<4> * global_bitarray, uint64_t * counters, uint64_t max_mallocs, uint64_t mallocs_per_thread){
uint64_t tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= max_mallocs) return;
uint64_t blockID = blockIdx.x;
uint64_t my_val = global_bitarray[blockID].malloc(blockID);
uint64_t old_counter = atomicAdd((unsigned long long int *)&counters[my_val], 1ULL);
assert(old_counter == 0);
}
__global__ void malloc_init_kernel(bitarr_grouped<4> * global_bitarray, uint64_t num_bit_arr){
uint64_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= num_bit_arr) return;
global_bitarray[tid].init();
}
__host__ void build_bitarr_test_prefetch(uint64_t max_mallocs, uint64_t mallocs_per_thread, uint64_t block_size){
uint64_t max_blocks = (max_mallocs-1)/block_size+1;
bitarr_grouped<4> * dev_bitarray;
cudaMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
cudaDeviceSynchronize();
assert(dev_bitarray != nullptr);
malloc_init_kernel<<<(max_blocks -1)/512+1, 512>>>(dev_bitarray, max_blocks);
cudaDeviceSynchronize();
storage_bitmap<4> * local_bitmaps = storage_bitmap<4>::generate_buffers();
printf("Done with init\n");
auto bitarr_start = std::chrono::high_resolution_clock::now();
malloc_test_kernel_prefetching<<<(max_mallocs -1)/block_size+1, block_size>>>(dev_bitarray, local_bitmaps, max_mallocs, mallocs_per_thread);
cudaDeviceSynchronize();
auto bitarr_end = std::chrono::high_resolution_clock::now();
printf("Done with speed test\n");
std::chrono::duration<double> bit_diff = bitarr_end - bitarr_start;
std::cout << "bitarr Malloced " << max_mallocs*mallocs_per_thread << " in " << bit_diff.count() << " seconds, " << block_size << "max block size\n";
printf("%f allocs per second\n", ((double) max_mallocs*mallocs_per_thread)/ bit_diff.count());
cudaDeviceSynchronize();
cudaFree(dev_bitarray);
cudaDeviceSynchronize();
uint64_t * max_counters;
cudaMalloc((void ** )&max_counters, sizeof(uint64_t)*max_blocks*4096);
assert(max_counters != nullptr);
cudaMemset(max_counters, 0, sizeof(uint64_t)*max_blocks*4096);
//and boot correctness test
cudaMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
assert(dev_bitarray != nullptr);
cudaDeviceSynchronize();
malloc_init_kernel<<<(max_blocks -1)/512+1, 512>>>(dev_bitarray, max_blocks);
cudaDeviceSynchronize();
//malloc_test_correctness_kernel<<<(max_mallocs -1)/block_size+1, block_size>>>(dev_bitarray, max_counters, max_mallocs, mallocs_per_thread);
cudaDeviceSynchronize();
cudaFree(max_counters);
cudaFree(dev_bitarray);
}
__host__ void build_bitarr_test_split(uint64_t max_mallocs, uint64_t mallocs_per_thread, uint64_t block_size){
uint64_t max_blocks = (max_mallocs-1)/block_size+1;
bitarr_grouped<4> * dev_bitarray;
cudaMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
cudaDeviceSynchronize();
assert(dev_bitarray != nullptr);
malloc_init_kernel<<<(max_blocks -1)/512+1, 512>>>(dev_bitarray, max_blocks);
cudaDeviceSynchronize();
storage_bitmap<4> * local_bitmaps = storage_bitmap<4>::generate_buffers_blocks(max_blocks);
printf("Done with init\n");
auto bitarr_start = std::chrono::high_resolution_clock::now();
malloc_test_kernel_split_local<<<(max_mallocs -1)/block_size+1, block_size>>>(dev_bitarray, local_bitmaps, max_mallocs, mallocs_per_thread);
cudaDeviceSynchronize();
auto bitarr_end = std::chrono::high_resolution_clock::now();
printf("Done with speed test\n");
std::chrono::duration<double> bit_diff = bitarr_end - bitarr_start;
std::cout << "bitarr Malloced " << max_mallocs*mallocs_per_thread << " in " << bit_diff.count() << " seconds, " << block_size << "max block size\n";
printf("%f allocs per second\n", ((double) max_mallocs*mallocs_per_thread)/ bit_diff.count());
cudaDeviceSynchronize();
cudaFree(dev_bitarray);
cudaDeviceSynchronize();
uint64_t * max_counters;
cudaMalloc((void ** )&max_counters, sizeof(uint64_t)*max_blocks*4096);
assert(max_counters != nullptr);
cudaMemset(max_counters, 0, sizeof(uint64_t)*max_blocks*4096);
//and boot correctness test
cudaMalloc((void **)& dev_bitarray, sizeof(bitarr_grouped<4>)*max_blocks);
assert(dev_bitarray != nullptr);
cudaDeviceSynchronize();
malloc_init_kernel<<<(max_blocks -1)/512+1, 512>>>(dev_bitarray, max_blocks);
cudaDeviceSynchronize();
//malloc_test_correctness_kernel<<<(max_mallocs -1)/block_size+1, block_size>>>(dev_bitarray, max_counters, max_mallocs, mallocs_per_thread);
cudaDeviceSynchronize();
cudaFree(max_counters);
cudaFree(dev_bitarray);
}
int main(int argc, char** argv) {
//1 mil
build_bitarr_test_prefetch(512, 2, 512);
//10 mil
//build_bitarr_test_prefetch(10000000, 1, 512);
// //100 mil
// build_bitarr_test_prefetch(100000000, 1, 512);
// //1 mil
// build_bitarr_test_prefetch(1000000, 1, 1024);
// //10 mil
// build_bitarr_test_prefetch(10000000, 1, 1024);
// //100 mil
// build_bitarr_test_prefetch(100000000, 1, 1024);
return 0;
}
|
08d98be508af88e7f06379e5a4e497592bde1d2d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CUDA_TENSOR_SAMPLING_CU_
#define _CUDA_TENSOR_SAMPLING_CU_
#include "cuda_tensor.cuh"
#include "rand/cuda_rand.cuh"
#include "rand/cuda_sampling.cuh"
namespace apex_tensor{
namespace cuda_tensor{
// sample binary using prob
template<int st_m,int block_dim_bits>
__global__ void sample_binary_kernel( float *elem_dst , const float *elem_src,
unsigned int pitch_dst, unsigned int pitch_src,
int y_max , int x_max,
const float *rnd ){
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line ( elem_dst, y, pitch_dst );
elem_src = get_line_const( elem_src, y, pitch_src );
if( y < y_max && x < x_max ){
float val = cuda_rand::sample_binary( elem_src[x], cuda_rand::get_rand( rnd, tid ) - 1.0f );
store_method::__store<st_m>( elem_dst[x], val );
}
}
template<int st_m,typename T>
inline void sample_binary( T &dst, const T &src ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
hipLaunchKernelGGL(( sample_binary_kernel<st_m,BASE_THREAD_BITS>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
dst.elem, src.elem, dst.pitch, src.pitch, y_max, x_max, rnd );
}
template<int st_m,int block_dim_bits>
__global__ void sample_recified_linear_kernel( float *elem_dst , const float *elem_src,
unsigned int pitch_dst, unsigned int pitch_src,
int y_max , int x_max,
const float *rnd ){
__shared__ float s_rnd[ 1<<block_dim_bits ];
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const float r = cuda_rand::sample_gaussian<block_dim_bits>( cuda_rand::get_rand(rnd,tid), threadIdx.x, s_rnd );
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line ( elem_dst, y, pitch_dst );
elem_src = get_line_const( elem_src, y, pitch_src );
if( y < y_max && x < x_max ){
float ans = elem_src[x] + r / ( 1.0f + expf( -elem_src[x] ) );
if( ans < 0.0f ) ans = 0.0f;
store_method::__store<st_m>( elem_dst[x], ans );
}
}
template<int st_m,typename T>
inline void sample_recified_linear( T &dst, const T &src ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
hipLaunchKernelGGL(( sample_recified_linear_kernel<st_m,BASE_THREAD_BITS>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
dst.elem, src.elem, dst.pitch, src.pitch, y_max, x_max, rnd );
}
// sample gaussian with given mean and sd
template<int st_m,int block_dim_bits>
__global__ void sample_gaussian_kernel( float *elem_dst , const float *elem_src,
unsigned int pitch_dst, unsigned int pitch_src,
int y_max , int x_max,
const float *rnd, float sd ){
__shared__ float s_rnd[ 1<<block_dim_bits ];
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const float r = cuda_rand::sample_gaussian<block_dim_bits>( cuda_rand::get_rand(rnd,tid), threadIdx.x, s_rnd ) * sd;
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line ( elem_dst, y, pitch_dst );
elem_src = get_line_const( elem_src, y, pitch_src );
if( y < y_max && x < x_max ){
store_method::__store<st_m>( elem_dst[x], elem_src[x] + r );
}
}
template<int st_m,typename T>
inline void sample_gaussian( T &dst, const T &src, float sd ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
hipLaunchKernelGGL(( sample_gaussian_kernel<st_m,BASE_THREAD_BITS>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
dst.elem, src.elem, dst.pitch, src.pitch, y_max, x_max, rnd, sd );
}
// sample gaussian
template<int st_m,int block_dim_bits>
__global__ void sample_gaussian_kernel( float *elem_dst ,
unsigned int pitch_dst,
int y_max , int x_max,
const float *rnd, float sd ){
__shared__ float s_rnd[ 1<<block_dim_bits ];
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const float r = cuda_rand::sample_gaussian<block_dim_bits>( cuda_rand::get_rand(rnd,tid), threadIdx.x, s_rnd ) * sd;
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line( elem_dst, y, pitch_dst );
if( y < y_max && x < x_max ){
store_method::__store<st_m>( elem_dst[x], r );
}
}
template<int st_m,typename T>
inline void sample_gaussian( T &dst, float sd ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
hipLaunchKernelGGL(( sample_gaussian_kernel<st_m,BASE_THREAD_BITS>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
dst.elem, dst.pitch, y_max, x_max, rnd, sd );
}
/*
sample maxpooling with pool_size = 2^pool_bits
with block shape < Y_UNIT , X_UNIT >
*/
template<int st_m,int pool_bits>
__device__ void __sample_maxpooling_procedure_rec( int block_y,
int block_x,
float s_mm[Y_UNIT][MEM_UNIT],
__GT2D dst,
const __GT2D prob,
const float *rnd ){
float r = cuda_rand::get_rand( rnd, (threadIdx.y<<MEM_UNIT_BITS) + threadIdx.x ) - 1.0f;
// load from src
for( int y = 0 ; y < (1<<pool_bits) ; y ++ )
for( int x = 0 ; x < (1<<pool_bits) ; x ++ ){
int y_idx = block_y * (Y_UNIT << pool_bits) + (y<<Y_UNIT_BITS) + threadIdx.y;
int x_idx = block_x * (MEM_UNIT << pool_bits) + (x<<MEM_UNIT_BITS) + threadIdx.x;
// we don't need to sync here since each thread always use the same position
//__syncthreads();
// load data into memory
if( y_idx < prob.y_max && x_idx < prob.x_max ) {
s_mm[ threadIdx.y ][ threadIdx.x ] = prob[ y_idx ][ x_idx ];
}else{
s_mm[ threadIdx.y ][ threadIdx.x ] = 0.0f;
}
__syncthreads();
// if the thread is in this range
if( y == ((threadIdx.y<<pool_bits)>>Y_UNIT_BITS) && x == ((threadIdx.x<<pool_bits)>>MEM_UNIT_BITS) ){
// no bank conflict in the same pool, since we only access bank in the same row
cuda_rand::sample_maxpooling<pool_bits,MEM_UNIT>( (threadIdx.y<<pool_bits) &Y_UNIT_MASK,
(threadIdx.x<<pool_bits) &MEM_UNIT_MASK,
s_mm, r );
}
__syncthreads();
if( y_idx < dst.y_max && x_idx < dst.x_max ) {
float s = s_mm[ threadIdx.y ][ threadIdx.x ];
store_method::__store<st_m>( dst[y_idx][x_idx], s );
}
}
}
/* pooling kernel, using 3DGrid */
template<int st_m, int pool_bits>
__global__ void __sample_maxpooling_rec_kernel_3DGrid( int grid_width,
__GT3D dst,
const __GT3D prob,
const float *rnd ){
const int block_z = blockIdx.y;
const int block_y = blockIdx.x / grid_width;
const int block_x = blockIdx.x % grid_width;
__shared__ float s_mm[ Y_UNIT ][ MEM_UNIT ];
__sample_maxpooling_procedure_rec<st_m,pool_bits>
( block_y, block_x, s_mm, dst[block_z], prob[block_z], rnd +
block_z*(gridDim.x<<(MEM_UNIT_BITS+Y_UNIT_BITS)) + (blockIdx.x<<(MEM_UNIT_BITS+Y_UNIT_BITS)) );
}
template<int st_m, int pool_bits>
inline void __sample_maxpooling_rec( GTensor3D &dst, const GTensor3D &prob ){
dim3 dimBlock( MEM_UNIT , Y_UNIT );
const int d_y_max = (prob.y_max + (1<<pool_bits) - 1) >> pool_bits;
const int d_x_max = (prob.x_max + (1<<pool_bits) - 1) >> pool_bits;
int grid_height= (d_y_max+Y_UNIT-1 ) >> Y_UNIT_BITS ;
int grid_width = (d_x_max+MEM_UNIT-1) >> MEM_UNIT_BITS;
dim3 dimGrid( grid_width*grid_height, prob.z_max );
const float *rnd = cuda_rand::rand_singles( (dimGrid.y*dimGrid.x)<<(MEM_UNIT_BITS+Y_UNIT_BITS) );
hipLaunchKernelGGL(( __sample_maxpooling_rec_kernel_3DGrid<st_m,pool_bits>), dim3(dimGrid),dim3(dimBlock), 0, 0, grid_width, __GT(dst), __GT(prob), rnd );
}
/*
sample maxpooling with pool_size
with block shape < pool_size , 16*pool_size >
*/
template<int st_m,int pool_size>
__device__ void __sample_maxpooling_procedure_ord( int block_y,
int block_x,
float s_mm[pool_size][MEM_UNIT*pool_size],
__GT2D dst,
const __GT2D prob,
const float *rnd ){
float r = cuda_rand::get_rand( rnd, (threadIdx.y*pool_size*MEM_UNIT) + threadIdx.x ) - 1.0f;
// load from src
for( int y = 0 ; y < pool_size ; y ++ )
for( int x = 0 ; x < pool_size ; x ++ ){
int y_idx = block_y*pool_size*pool_size + y*pool_size + threadIdx.y;
int x_idx = block_x*pool_size*pool_size*MEM_UNIT + x*pool_size*MEM_UNIT + threadIdx.x;
// we don't need to sync here since each thread always use the same position
//__syncthreads();
// load data into memory
if( y_idx < prob.y_max && x_idx < prob.x_max ) {
s_mm[ threadIdx.y ][ threadIdx.x ] = prob[ y_idx ][ x_idx ];
}else{
s_mm[ threadIdx.y ][ threadIdx.x ] = 0.0f;
}
__syncthreads();
// if the thread is in this range
if( y == threadIdx.y && x == (threadIdx.x>>MEM_UNIT_BITS) ){
// no bank conflict in the same pool, since we only access bank in the same row
cuda_rand::sample_maxpooling_ord<pool_size,MEM_UNIT>( 0,
(threadIdx.x & MEM_UNIT_MASK) * pool_size,
s_mm, r );
}
__syncthreads();
if( y_idx < dst.y_max && x_idx < dst.x_max ) {
float s = s_mm[ threadIdx.y ][ threadIdx.x ];
store_method::__store<st_m>( dst[y_idx][x_idx], s );
}
}
}
template<int st_m, int pool_size>
__global__ void __sample_maxpooling_ord_kernel_3DGrid( int grid_width,
__GT3D dst,
const __GT3D prob,
const float *rnd ){
const int block_z = blockIdx.y;
const int block_y = blockIdx.x / grid_width;
const int block_x = blockIdx.x % grid_width;
__shared__ float s_mm[ pool_size ][ pool_size*MEM_UNIT ];
__sample_maxpooling_procedure_ord<st_m,pool_size>
( block_y, block_x, s_mm, dst[block_z], prob[block_z],
rnd + block_z*(gridDim.x*pool_size*pool_size*MEM_UNIT) + (blockIdx.x*pool_size*pool_size*MEM_UNIT) );
}
template<int st_m, int pool_size>
inline void __sample_maxpooling_ord( GTensor3D &dst, const GTensor3D &prob ){
dim3 dimBlock( pool_size*MEM_UNIT, pool_size );
const int d_y_max = (prob.y_max + pool_size-1) / pool_size;
const int d_x_max = (prob.x_max + pool_size-1) / pool_size;
int grid_height= (d_y_max+pool_size -1) / pool_size;
int grid_width = (d_x_max+pool_size*MEM_UNIT-1) / (pool_size*MEM_UNIT);
dim3 dimGrid( grid_width*grid_height, prob.z_max );
const float *rnd = cuda_rand::rand_singles( (dimGrid.y*dimGrid.x)*(pool_size*pool_size*MEM_UNIT) );
hipLaunchKernelGGL(( __sample_maxpooling_ord_kernel_3DGrid<st_m,pool_size>), dim3(dimGrid),dim3(dimBlock), 0, 0, grid_width, __GT(dst), __GT(prob), rnd );
}
/* pooling data up */
template<int st_m>
inline void sample_maxpooling( GTensor3D &dst, const GTensor3D &prob, int pool_size ){
switch( pool_size ){
case 1: sample_binary<st_m>( dst, prob ); break;
case 2: __sample_maxpooling_rec<st_m,1>( dst, prob ); break;
case 3: __sample_maxpooling_ord <st_m,3>( dst, prob ); break;
case 4: __sample_maxpooling_rec<st_m,2>( dst, prob ); break;
case 8: __sample_maxpooling_rec<st_m,3>( dst, prob ); break;
default: error("pooling size not supported");
}
}
};
};
#endif
|
08d98be508af88e7f06379e5a4e497592bde1d2d.cu
|
#ifndef _CUDA_TENSOR_SAMPLING_CU_
#define _CUDA_TENSOR_SAMPLING_CU_
#include "cuda_tensor.cuh"
#include "rand/cuda_rand.cuh"
#include "rand/cuda_sampling.cuh"
namespace apex_tensor{
namespace cuda_tensor{
// sample binary using prob
template<int st_m,int block_dim_bits>
__global__ void sample_binary_kernel( float *elem_dst , const float *elem_src,
unsigned int pitch_dst, unsigned int pitch_src,
int y_max , int x_max,
const float *rnd ){
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line ( elem_dst, y, pitch_dst );
elem_src = get_line_const( elem_src, y, pitch_src );
if( y < y_max && x < x_max ){
float val = cuda_rand::sample_binary( elem_src[x], cuda_rand::get_rand( rnd, tid ) - 1.0f );
store_method::__store<st_m>( elem_dst[x], val );
}
}
template<int st_m,typename T>
inline void sample_binary( T &dst, const T &src ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
sample_binary_kernel<st_m,BASE_THREAD_BITS> <<<dimGrid,dimBlock>>>
( dst.elem, src.elem, dst.pitch, src.pitch, y_max, x_max, rnd );
}
template<int st_m,int block_dim_bits>
__global__ void sample_recified_linear_kernel( float *elem_dst , const float *elem_src,
unsigned int pitch_dst, unsigned int pitch_src,
int y_max , int x_max,
const float *rnd ){
__shared__ float s_rnd[ 1<<block_dim_bits ];
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const float r = cuda_rand::sample_gaussian<block_dim_bits>( cuda_rand::get_rand(rnd,tid), threadIdx.x, s_rnd );
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line ( elem_dst, y, pitch_dst );
elem_src = get_line_const( elem_src, y, pitch_src );
if( y < y_max && x < x_max ){
float ans = elem_src[x] + r / ( 1.0f + expf( -elem_src[x] ) );
if( ans < 0.0f ) ans = 0.0f;
store_method::__store<st_m>( elem_dst[x], ans );
}
}
template<int st_m,typename T>
inline void sample_recified_linear( T &dst, const T &src ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
sample_recified_linear_kernel<st_m,BASE_THREAD_BITS> <<<dimGrid,dimBlock>>>
( dst.elem, src.elem, dst.pitch, src.pitch, y_max, x_max, rnd );
}
// sample gaussian with given mean and sd
template<int st_m,int block_dim_bits>
__global__ void sample_gaussian_kernel( float *elem_dst , const float *elem_src,
unsigned int pitch_dst, unsigned int pitch_src,
int y_max , int x_max,
const float *rnd, float sd ){
__shared__ float s_rnd[ 1<<block_dim_bits ];
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const float r = cuda_rand::sample_gaussian<block_dim_bits>( cuda_rand::get_rand(rnd,tid), threadIdx.x, s_rnd ) * sd;
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line ( elem_dst, y, pitch_dst );
elem_src = get_line_const( elem_src, y, pitch_src );
if( y < y_max && x < x_max ){
store_method::__store<st_m>( elem_dst[x], elem_src[x] + r );
}
}
template<int st_m,typename T>
inline void sample_gaussian( T &dst, const T &src, float sd ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
sample_gaussian_kernel<st_m,BASE_THREAD_BITS> <<<dimGrid,dimBlock>>>
( dst.elem, src.elem, dst.pitch, src.pitch, y_max, x_max, rnd, sd );
}
// sample gaussian
template<int st_m,int block_dim_bits>
__global__ void sample_gaussian_kernel( float *elem_dst ,
unsigned int pitch_dst,
int y_max , int x_max,
const float *rnd, float sd ){
__shared__ float s_rnd[ 1<<block_dim_bits ];
const int tid = (blockIdx.x << block_dim_bits) + threadIdx.x;
const float r = cuda_rand::sample_gaussian<block_dim_bits>( cuda_rand::get_rand(rnd,tid), threadIdx.x, s_rnd ) * sd;
const int x_mm= get_align_width( x_max );
const int y = tid / x_mm;
const int x = tid % x_mm;
elem_dst = get_line( elem_dst, y, pitch_dst );
if( y < y_max && x < x_max ){
store_method::__store<st_m>( elem_dst[x], r );
}
}
template<int st_m,typename T>
inline void sample_gaussian( T &dst, float sd ){
int stride = get_align_width( dst.x_max );
int y_max = num_line( dst );
int x_max = dst.x_max;
int num_block = (y_max*stride + BASE_THREAD_NUM-1)/BASE_THREAD_NUM;
dim3 dimBlock( BASE_THREAD_NUM, 1, 1 );
dim3 dimGrid ( num_block , 1, 1 );
const float *rnd = cuda_rand::rand_singles( dimGrid.x * dimBlock.x );
sample_gaussian_kernel<st_m,BASE_THREAD_BITS> <<<dimGrid,dimBlock>>>
( dst.elem, dst.pitch, y_max, x_max, rnd, sd );
}
/*
sample maxpooling with pool_size = 2^pool_bits
with block shape < Y_UNIT , X_UNIT >
*/
template<int st_m,int pool_bits>
__device__ void __sample_maxpooling_procedure_rec( int block_y,
int block_x,
float s_mm[Y_UNIT][MEM_UNIT],
__GT2D dst,
const __GT2D prob,
const float *rnd ){
float r = cuda_rand::get_rand( rnd, (threadIdx.y<<MEM_UNIT_BITS) + threadIdx.x ) - 1.0f;
// load from src
for( int y = 0 ; y < (1<<pool_bits) ; y ++ )
for( int x = 0 ; x < (1<<pool_bits) ; x ++ ){
int y_idx = block_y * (Y_UNIT << pool_bits) + (y<<Y_UNIT_BITS) + threadIdx.y;
int x_idx = block_x * (MEM_UNIT << pool_bits) + (x<<MEM_UNIT_BITS) + threadIdx.x;
// we don't need to sync here since each thread always use the same position
//__syncthreads();
// load data into memory
if( y_idx < prob.y_max && x_idx < prob.x_max ) {
s_mm[ threadIdx.y ][ threadIdx.x ] = prob[ y_idx ][ x_idx ];
}else{
s_mm[ threadIdx.y ][ threadIdx.x ] = 0.0f;
}
__syncthreads();
// if the thread is in this range
if( y == ((threadIdx.y<<pool_bits)>>Y_UNIT_BITS) && x == ((threadIdx.x<<pool_bits)>>MEM_UNIT_BITS) ){
// no bank conflict in the same pool, since we only access bank in the same row
cuda_rand::sample_maxpooling<pool_bits,MEM_UNIT>( (threadIdx.y<<pool_bits) &Y_UNIT_MASK,
(threadIdx.x<<pool_bits) &MEM_UNIT_MASK,
s_mm, r );
}
__syncthreads();
if( y_idx < dst.y_max && x_idx < dst.x_max ) {
float s = s_mm[ threadIdx.y ][ threadIdx.x ];
store_method::__store<st_m>( dst[y_idx][x_idx], s );
}
}
}
/* pooling kernel, using 3DGrid */
template<int st_m, int pool_bits>
__global__ void __sample_maxpooling_rec_kernel_3DGrid( int grid_width,
__GT3D dst,
const __GT3D prob,
const float *rnd ){
const int block_z = blockIdx.y;
const int block_y = blockIdx.x / grid_width;
const int block_x = blockIdx.x % grid_width;
__shared__ float s_mm[ Y_UNIT ][ MEM_UNIT ];
__sample_maxpooling_procedure_rec<st_m,pool_bits>
( block_y, block_x, s_mm, dst[block_z], prob[block_z], rnd +
block_z*(gridDim.x<<(MEM_UNIT_BITS+Y_UNIT_BITS)) + (blockIdx.x<<(MEM_UNIT_BITS+Y_UNIT_BITS)) );
}
template<int st_m, int pool_bits>
inline void __sample_maxpooling_rec( GTensor3D &dst, const GTensor3D &prob ){
dim3 dimBlock( MEM_UNIT , Y_UNIT );
const int d_y_max = (prob.y_max + (1<<pool_bits) - 1) >> pool_bits;
const int d_x_max = (prob.x_max + (1<<pool_bits) - 1) >> pool_bits;
int grid_height= (d_y_max+Y_UNIT-1 ) >> Y_UNIT_BITS ;
int grid_width = (d_x_max+MEM_UNIT-1) >> MEM_UNIT_BITS;
dim3 dimGrid( grid_width*grid_height, prob.z_max );
const float *rnd = cuda_rand::rand_singles( (dimGrid.y*dimGrid.x)<<(MEM_UNIT_BITS+Y_UNIT_BITS) );
__sample_maxpooling_rec_kernel_3DGrid<st_m,pool_bits><<<dimGrid,dimBlock>>>( grid_width, __GT(dst), __GT(prob), rnd );
}
/*
sample maxpooling with pool_size
with block shape < pool_size , 16*pool_size >
*/
template<int st_m,int pool_size>
__device__ void __sample_maxpooling_procedure_ord( int block_y,
int block_x,
float s_mm[pool_size][MEM_UNIT*pool_size],
__GT2D dst,
const __GT2D prob,
const float *rnd ){
float r = cuda_rand::get_rand( rnd, (threadIdx.y*pool_size*MEM_UNIT) + threadIdx.x ) - 1.0f;
// load from src
for( int y = 0 ; y < pool_size ; y ++ )
for( int x = 0 ; x < pool_size ; x ++ ){
int y_idx = block_y*pool_size*pool_size + y*pool_size + threadIdx.y;
int x_idx = block_x*pool_size*pool_size*MEM_UNIT + x*pool_size*MEM_UNIT + threadIdx.x;
// we don't need to sync here since each thread always use the same position
//__syncthreads();
// load data into memory
if( y_idx < prob.y_max && x_idx < prob.x_max ) {
s_mm[ threadIdx.y ][ threadIdx.x ] = prob[ y_idx ][ x_idx ];
}else{
s_mm[ threadIdx.y ][ threadIdx.x ] = 0.0f;
}
__syncthreads();
// if the thread is in this range
if( y == threadIdx.y && x == (threadIdx.x>>MEM_UNIT_BITS) ){
// no bank conflict in the same pool, since we only access bank in the same row
cuda_rand::sample_maxpooling_ord<pool_size,MEM_UNIT>( 0,
(threadIdx.x & MEM_UNIT_MASK) * pool_size,
s_mm, r );
}
__syncthreads();
if( y_idx < dst.y_max && x_idx < dst.x_max ) {
float s = s_mm[ threadIdx.y ][ threadIdx.x ];
store_method::__store<st_m>( dst[y_idx][x_idx], s );
}
}
}
template<int st_m, int pool_size>
__global__ void __sample_maxpooling_ord_kernel_3DGrid( int grid_width,
__GT3D dst,
const __GT3D prob,
const float *rnd ){
const int block_z = blockIdx.y;
const int block_y = blockIdx.x / grid_width;
const int block_x = blockIdx.x % grid_width;
__shared__ float s_mm[ pool_size ][ pool_size*MEM_UNIT ];
__sample_maxpooling_procedure_ord<st_m,pool_size>
( block_y, block_x, s_mm, dst[block_z], prob[block_z],
rnd + block_z*(gridDim.x*pool_size*pool_size*MEM_UNIT) + (blockIdx.x*pool_size*pool_size*MEM_UNIT) );
}
template<int st_m, int pool_size>
inline void __sample_maxpooling_ord( GTensor3D &dst, const GTensor3D &prob ){
dim3 dimBlock( pool_size*MEM_UNIT, pool_size );
const int d_y_max = (prob.y_max + pool_size-1) / pool_size;
const int d_x_max = (prob.x_max + pool_size-1) / pool_size;
int grid_height= (d_y_max+pool_size -1) / pool_size;
int grid_width = (d_x_max+pool_size*MEM_UNIT-1) / (pool_size*MEM_UNIT);
dim3 dimGrid( grid_width*grid_height, prob.z_max );
const float *rnd = cuda_rand::rand_singles( (dimGrid.y*dimGrid.x)*(pool_size*pool_size*MEM_UNIT) );
__sample_maxpooling_ord_kernel_3DGrid<st_m,pool_size><<<dimGrid,dimBlock>>>( grid_width, __GT(dst), __GT(prob), rnd );
}
/* pooling data up */
template<int st_m>
inline void sample_maxpooling( GTensor3D &dst, const GTensor3D &prob, int pool_size ){
switch( pool_size ){
case 1: sample_binary<st_m>( dst, prob ); break;
case 2: __sample_maxpooling_rec<st_m,1>( dst, prob ); break;
case 3: __sample_maxpooling_ord <st_m,3>( dst, prob ); break;
case 4: __sample_maxpooling_rec<st_m,2>( dst, prob ); break;
case 8: __sample_maxpooling_rec<st_m,3>( dst, prob ); break;
default: error("pooling size not supported");
}
}
};
};
#endif
|
360b2d53db827384c9510a531291cffb4449b452.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#define BLOCK_SIZE 4
#define N 32
__global__ void prod(int *A, int *B, int *C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
int Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m < (N / BLOCK_SIZE); ++m)
{
__shared__ int slice_A[BLOCK_SIZE * BLOCK_SIZE];
__shared__ int slice_B[BLOCK_SIZE * BLOCK_SIZE];
slice_A[row * BLOCK_SIZE + col] = A[(BLOCK_SIZE * blockRow + row) * N + m * BLOCK_SIZE + col];
slice_B[row * BLOCK_SIZE + col] = B[(BLOCK_SIZE * m + row) * N + blockCol * BLOCK_SIZE + col];
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += slice_A[row * BLOCK_SIZE + e] * slice_B[e * BLOCK_SIZE + col];
__syncthreads();
}
C[(BLOCK_SIZE * blockRow + row) * N + BLOCK_SIZE * blockCol + col] = Cvalue;
}
int main( void )
{
int a[N * N], b[N * N], c[N * N];
int *dev_a, *dev_b, *dev_c;
for (int i = 0; i < N * N; ++i)
{
a[i] = 1;
b[i] = 1;
}
hipMalloc((void**)&dev_a, N * N * sizeof(int));
hipMalloc((void**)&dev_b, N * N * sizeof(int));
hipMalloc((void**)&dev_c, N * N * sizeof(int));
hipMemcpy(dev_a, a, N * N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * N * sizeof(int), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(N / dimBlock.x, N / dimBlock.y);
hipLaunchKernelGGL(( prod), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_a, dev_b, dev_c);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float worktime;
hipEventElapsedTime(&worktime, start, stop);
printf("Time = %f ms \n", worktime);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(c, dev_c, N * N * sizeof(int), hipMemcpyDeviceToHost);
printf("c[0][0] = %d\n", c[0]);
printf("c[N-1][N-1] = %d\n", c[N * N - 1]);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
360b2d53db827384c9510a531291cffb4449b452.cu
|
#include "stdio.h"
#define BLOCK_SIZE 4
#define N 32
__global__ void prod(int *A, int *B, int *C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
int Cvalue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m < (N / BLOCK_SIZE); ++m)
{
__shared__ int slice_A[BLOCK_SIZE * BLOCK_SIZE];
__shared__ int slice_B[BLOCK_SIZE * BLOCK_SIZE];
slice_A[row * BLOCK_SIZE + col] = A[(BLOCK_SIZE * blockRow + row) * N + m * BLOCK_SIZE + col];
slice_B[row * BLOCK_SIZE + col] = B[(BLOCK_SIZE * m + row) * N + blockCol * BLOCK_SIZE + col];
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += slice_A[row * BLOCK_SIZE + e] * slice_B[e * BLOCK_SIZE + col];
__syncthreads();
}
C[(BLOCK_SIZE * blockRow + row) * N + BLOCK_SIZE * blockCol + col] = Cvalue;
}
int main( void )
{
int a[N * N], b[N * N], c[N * N];
int *dev_a, *dev_b, *dev_c;
for (int i = 0; i < N * N; ++i)
{
a[i] = 1;
b[i] = 1;
}
cudaMalloc((void**)&dev_a, N * N * sizeof(int));
cudaMalloc((void**)&dev_b, N * N * sizeof(int));
cudaMalloc((void**)&dev_c, N * N * sizeof(int));
cudaMemcpy(dev_a, a, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(N / dimBlock.x, N / dimBlock.y);
prod<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float worktime;
cudaEventElapsedTime(&worktime, start, stop);
printf("Time = %f ms \n", worktime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(c, dev_c, N * N * sizeof(int), cudaMemcpyDeviceToHost);
printf("c[0][0] = %d\n", c[0]);
printf("c[N-1][N-1] = %d\n", c[N * N - 1]);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
16240279c37549061d07256a145848a3c12baaaa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
16240279c37549061d07256a145848a3c12baaaa.cu
|
#include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
626a01be9d491a40839f40f7349de8e3d751aac8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
using namespace std;
void checkCUDAError(const char *msg);
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
static const int BLOCKSIZE = 16;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Allocate a 2D array
double **alloc2D(int m,int n){
double **E;
int nx=n, ny=m;
E = (double**)malloc(sizeof(double*)*ny + sizeof(double)*nx*ny);
assert(E);
int j;
for(j=0;j<ny;j++)
E[j] = (double*)(E+ny) + j*nx;
return(E);
}
double *flatten(double **array, int m, int n)
{
double *a;
a = (double*)malloc(sizeof(double)*(m+2)*(n+2));
int i, j;
for(j=0;j<=m + 1; j++){
for (i = 0; i <= n + 1; i++) {
a[(j * (n+2)) + i] = array[j][i];
}
}
return a;
}
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(double *E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++)
for (i=1; i<=n; i++) {
l2norm += E[(j * (n+2)) + i]*E[(j * (n+2)) + i];
if (E[(j * (n+2)) + i] > mx)
mx = E[(j * (n+2)) + i];
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
// External functions
extern "C" {
void splot(double **E, double T, int niter, int m, int n);
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
__global__
void vecODEKernel(double* R, double* E, double epsilon, double M1, double M2, double dt, double kk, double a, double b, int n)
{
int row = blockIdx.y*blockDim.y+threadIdx.y+1;
int col = blockIdx.x*blockDim.x+threadIdx.x+1;
if((row < n) && (col < n)) {
row = row * (n+2);
E[row + col] = E[row + col] -dt*(kk* E[row + col]*(E[row + col] - a)*(E[row + col]-1)+ E[row + col] *R[row + col]);
R[row + col] = R[row + col] + dt*(epsilon+M1* R[row + col]/( E[row + col]+M2))*(-R[row + col]-kk* E[row + col]*(E[row + col]-b-1));
}
}
__global__
void boundaryKernel(double *E_prev, int m, int n)
{
int row = blockIdx.y*blockDim.y+threadIdx.y+1;
int col = blockIdx.x*blockDim.x+threadIdx.x+1;
row = row * (n+2);
E_prev[row] = E_prev[row + 2];
E_prev[row + n + 1] = E_prev[row + (n-1)];
E_prev[col] = E_prev[col+2];
E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col];
}
__global__
void matAllKernel(double alpha, double* E, double* E_prev, double* R, int n, int m, double epsilon, double M1, double M2, double dt, double kk, double a, double b)
{
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
int row_m = row * (n+2);
// Mirror boundary setup
if(col == 0 || col == (n+1)) {
E_prev[row_m] = E_prev[row_m + 2];
E_prev[row_m + n + 1] = E_prev[row_m + (n-1)];
}
if(row == 0 || row == (n+1)) {
E_prev[col] = E_prev[col+2];
E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col];
}
__syncthreads();
row = row + 1;
col = col + 1;
if((row < n) && (col < n)) {
row = row * (n+2);
//PDE
E[row + col] = E_prev[row + col]+alpha*(E_prev[row + col + 1]+E_prev[row + col -1]-4*E_prev[row + col]+E_prev[row + col + (n+2)]+E_prev[row + col - (n+2)]);
//ODE
E[row + col] = E[row + col] -dt*(kk* E[row + col]*(E[row + col] - a)*(E[row + col]-1)+ E[row + col] *R[row + col]);
R[row + col] = R[row + col] + dt*(epsilon+M1* R[row + col]/( E[row + col]+M2))*(-R[row + col]-kk* E[row + col]*(E[row + col]-b-1));
}
}
void simulate (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimBlock(BLOCKSIZE,BLOCKSIZE,1);
dim3 DimGrid(ceil((double)n/DimBlock.x), ceil((double)n/DimBlock.y));
hipLaunchKernelGGL(( matAllKernel), dim3(DimGrid), dim3(DimBlock), 0, 0, alpha, E, E_prev, R, n, m, epsilon, M1, M2, dt, kk, a, b);
}
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
double **E, **R, **E_prev;
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
E = alloc2D(m+2,n+2);
E_prev = alloc2D(m+2,n+2);
R = alloc2D(m+2,n+2);
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
E_prev[j][i] = R[j][i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
E_prev[j][i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
R[j][i] = 1.0;
double *Ef, *Rf, *E_prevf;
Ef = flatten(E, m, n);
Rf = flatten(R, m, n);
E_prevf = flatten(E_prev, m, n);
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
// Integer timestep number
int niter=0;
int size = ((n+2)*(m+2) * sizeof(double));
double *d_E, *d_E_prev, *d_R;
// allocate memory for the devices
hipMalloc((void **) &d_E, size);
hipMalloc((void **) &d_E_prev, size);
hipMalloc((void **) &d_R, size);
checkCUDAError("Error allocating device memory arrays");
// copy all arrays to device
hipMemcpy(d_R, Rf, size, hipMemcpyHostToDevice);
checkCUDAError("Unable to copy to device, R");
hipMemcpy(d_E_prev, E_prevf, size, hipMemcpyHostToDevice);
checkCUDAError("Unable to copy to device, E_prev");
hipMemcpy(d_E, Ef, size, hipMemcpyHostToDevice);
checkCUDAError("Unable to copy to device, E");
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Start the timer
double t0 = getTime();
while (t<T) {
t += dt;
niter++;
simulate(d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
double *tmp = d_E; d_E = d_E_prev; d_E_prev = tmp;
if (plot_freq){
int k = (int)(t/plot_freq);
if ((t - k * plot_freq) < dt){
splot(E,t,niter,m+2,n+2);
}
}
}//end of while loop
double time_elapsed = getTime() - t0;
// copy back all arrays
hipMemcpy(E_prevf, d_E_prev, size, hipMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device, E_prev");
hipMemcpy(Rf, d_R, size, hipMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device, R");
hipMemcpy(Ef, d_E, size, hipMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device, E");
// free memory
hipFree(d_R); hipFree(d_E); hipFree(d_E_prev);
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
double mx;
double l2norm = stats(E_prevf,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
free (E);
free (E_prev);
free (R);
return 0;
}
|
626a01be9d491a40839f40f7349de8e3d751aac8.cu
|
/*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
using namespace std;
void checkCUDAError(const char *msg);
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
static const int BLOCKSIZE = 16;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Allocate a 2D array
double **alloc2D(int m,int n){
double **E;
int nx=n, ny=m;
E = (double**)malloc(sizeof(double*)*ny + sizeof(double)*nx*ny);
assert(E);
int j;
for(j=0;j<ny;j++)
E[j] = (double*)(E+ny) + j*nx;
return(E);
}
double *flatten(double **array, int m, int n)
{
double *a;
a = (double*)malloc(sizeof(double)*(m+2)*(n+2));
int i, j;
for(j=0;j<=m + 1; j++){
for (i = 0; i <= n + 1; i++) {
a[(j * (n+2)) + i] = array[j][i];
}
}
return a;
}
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(double *E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++)
for (i=1; i<=n; i++) {
l2norm += E[(j * (n+2)) + i]*E[(j * (n+2)) + i];
if (E[(j * (n+2)) + i] > mx)
mx = E[(j * (n+2)) + i];
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
// External functions
extern "C" {
void splot(double **E, double T, int niter, int m, int n);
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
__global__
void vecODEKernel(double* R, double* E, double epsilon, double M1, double M2, double dt, double kk, double a, double b, int n)
{
int row = blockIdx.y*blockDim.y+threadIdx.y+1;
int col = blockIdx.x*blockDim.x+threadIdx.x+1;
if((row < n) && (col < n)) {
row = row * (n+2);
E[row + col] = E[row + col] -dt*(kk* E[row + col]*(E[row + col] - a)*(E[row + col]-1)+ E[row + col] *R[row + col]);
R[row + col] = R[row + col] + dt*(epsilon+M1* R[row + col]/( E[row + col]+M2))*(-R[row + col]-kk* E[row + col]*(E[row + col]-b-1));
}
}
__global__
void boundaryKernel(double *E_prev, int m, int n)
{
int row = blockIdx.y*blockDim.y+threadIdx.y+1;
int col = blockIdx.x*blockDim.x+threadIdx.x+1;
row = row * (n+2);
E_prev[row] = E_prev[row + 2];
E_prev[row + n + 1] = E_prev[row + (n-1)];
E_prev[col] = E_prev[col+2];
E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col];
}
__global__
void matAllKernel(double alpha, double* E, double* E_prev, double* R, int n, int m, double epsilon, double M1, double M2, double dt, double kk, double a, double b)
{
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
int row_m = row * (n+2);
// Mirror boundary setup
if(col == 0 || col == (n+1)) {
E_prev[row_m] = E_prev[row_m + 2];
E_prev[row_m + n + 1] = E_prev[row_m + (n-1)];
}
if(row == 0 || row == (n+1)) {
E_prev[col] = E_prev[col+2];
E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col];
}
__syncthreads();
row = row + 1;
col = col + 1;
if((row < n) && (col < n)) {
row = row * (n+2);
//PDE
E[row + col] = E_prev[row + col]+alpha*(E_prev[row + col + 1]+E_prev[row + col -1]-4*E_prev[row + col]+E_prev[row + col + (n+2)]+E_prev[row + col - (n+2)]);
//ODE
E[row + col] = E[row + col] -dt*(kk* E[row + col]*(E[row + col] - a)*(E[row + col]-1)+ E[row + col] *R[row + col]);
R[row + col] = R[row + col] + dt*(epsilon+M1* R[row + col]/( E[row + col]+M2))*(-R[row + col]-kk* E[row + col]*(E[row + col]-b-1));
}
}
void simulate (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimBlock(BLOCKSIZE,BLOCKSIZE,1);
dim3 DimGrid(ceil((double)n/DimBlock.x), ceil((double)n/DimBlock.y));
matAllKernel<<<DimGrid, DimBlock>>>(alpha, E, E_prev, R, n, m, epsilon, M1, M2, dt, kk, a, b);
}
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
double **E, **R, **E_prev;
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
E = alloc2D(m+2,n+2);
E_prev = alloc2D(m+2,n+2);
R = alloc2D(m+2,n+2);
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
E_prev[j][i] = R[j][i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
E_prev[j][i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
R[j][i] = 1.0;
double *Ef, *Rf, *E_prevf;
Ef = flatten(E, m, n);
Rf = flatten(R, m, n);
E_prevf = flatten(E_prev, m, n);
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
// Integer timestep number
int niter=0;
int size = ((n+2)*(m+2) * sizeof(double));
double *d_E, *d_E_prev, *d_R;
// allocate memory for the devices
cudaMalloc((void **) &d_E, size);
cudaMalloc((void **) &d_E_prev, size);
cudaMalloc((void **) &d_R, size);
checkCUDAError("Error allocating device memory arrays");
// copy all arrays to device
cudaMemcpy(d_R, Rf, size, cudaMemcpyHostToDevice);
checkCUDAError("Unable to copy to device, R");
cudaMemcpy(d_E_prev, E_prevf, size, cudaMemcpyHostToDevice);
checkCUDAError("Unable to copy to device, E_prev");
cudaMemcpy(d_E, Ef, size, cudaMemcpyHostToDevice);
checkCUDAError("Unable to copy to device, E");
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Start the timer
double t0 = getTime();
while (t<T) {
t += dt;
niter++;
simulate(d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
double *tmp = d_E; d_E = d_E_prev; d_E_prev = tmp;
if (plot_freq){
int k = (int)(t/plot_freq);
if ((t - k * plot_freq) < dt){
splot(E,t,niter,m+2,n+2);
}
}
}//end of while loop
double time_elapsed = getTime() - t0;
// copy back all arrays
cudaMemcpy(E_prevf, d_E_prev, size, cudaMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device, E_prev");
cudaMemcpy(Rf, d_R, size, cudaMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device, R");
cudaMemcpy(Ef, d_E, size, cudaMemcpyDeviceToHost);
checkCUDAError("Unable to retrieve result from device, E");
// free memory
cudaFree(d_R); cudaFree(d_E); cudaFree(d_E_prev);
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
double mx;
double l2norm = stats(E_prevf,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
free (E);
free (E_prev);
free (R);
return 0;
}
|
e98b05d51f5d1670d680904b037972940de40037.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
const int zsize,
T y,
T x,
T z,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || zsize> zsize) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
if (z <= 0) z = 0;
int y_low = (int) y;
int x_low = (int) x;
int z_low = (int) z;
int y_high;
int x_high;
int z_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
if (z_low >= zsize - 1) {
z_high = z_low = zsize - 1;
z = (T) z_low;
} else {
z_high = z_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T lz = z - z_low;
T hy = 1. - ly, hx = 1. - lx, hz = 1. - lz;
// do bilinear interpolation
T v1 = bottom_data[y_low * width * zsize + x_low * zsize + z_low];
T v2 = bottom_data[y_low * width * zsize + x_high * zsize + z_low];
T v3 = bottom_data[y_high * width * zsize + x_low * zsize + z_low];
T v4 = bottom_data[y_high * width * zsize + x_high * zsize + z_low];
T v5 = bottom_data[y_low * width * zsize + x_low * zsize + z_high];
T v6 = bottom_data[y_low * width * zsize + x_high * zsize + z_high];
T v7 = bottom_data[y_high * width * zsize + x_low * zsize + z_high];
T v8 = bottom_data[y_high * width * zsize + x_high * zsize + z_high];
T w1 = hy * hx * hz, w2 = hy * lx * hz, w3 = ly * hx * hz, w4 = ly * lx * hz;
T w5 = hy * hx * lz, w6 = hy * lx * lz, w7 = ly * hx * lz, w8 = ly * lx * lz;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 + w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8);
return val;
}
//**********************************************************************************************
template <typename T>
__global__ void RoIAlignRotated3DForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int zsize,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int sampling_ratio,
const T* bottom_rois,
T* top_data ) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pz) is an element in the pooled output
int pz = index % pooled_zsize;
int pw = (index / pooled_zsize) % pooled_width;
int ph = (index / pooled_zsize/ pooled_width) % pooled_height;
int c = (index / pooled_zsize/ pooled_width / pooled_height) % channels;
int n = index / pooled_zsize/ pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 8;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_center_z = offset_bottom_rois[3] * spatial_scale;
T roi_width = offset_bottom_rois[4] * spatial_scale;
T roi_height = offset_bottom_rois[5] * spatial_scale;
T roi_zsize = offset_bottom_rois[6] * spatial_scale;
T theta = offset_bottom_rois[7] * M_PI / 180.0;
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
roi_zsize = max(roi_zsize, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_z = static_cast<T>(roi_zsize) / static_cast<T>(pooled_zsize);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width * zsize;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_z =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_zsize / pooled_zsize);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T roi_start_z = -roi_zsize / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_z; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_z; iz++) {
const T zz = roi_start_z + pz * bin_size_z + static_cast<T>(iz + .5f) * bin_size_z / static_cast<T>(roi_bin_grid_z);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T z = zz + roi_center_z;
T val = bilinear_interpolate(
offset_bottom_data, height, width, zsize, y, x, z, index);
output_val += val;
}
}
}
output_val /= count;
top_data[index] = output_val;
}
}
//**********************************************************************************************
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width, const int zsize,
T y, T x, T z,
T & w1, T & w2, T & w3, T & w4, T & w5, T & w6, T & w7, T & w8,
int & x_low, int & x_high, int & y_low, int & y_high, int & z_low, int & z_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || z > zsize) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = z_low = z_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
if (z <= 0) z = 0;
y_low = (int) y;
x_low = (int) x;
z_low = (int) z;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
if (z_low >= zsize - 1) {
z_high = z_low = zsize - 1;
z = (T) z_low;
} else {
z_high = z_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T lz = z - z_low;
T hy = 1. - ly, hx = 1. - lx, hz = 1. - lz;
w1 = hy * hx * hz, w2 = hy * lx * hz, w3 = ly * hx * hz, w4 = ly * lx * hz;
w5 = hy * hx * lz, w6 = hy * lx * lz, w7 = ly * hx * lz, w8 = ly * lx * lz;
return;
}
template <typename T>
__global__ void RoIAlignRotated3DBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int zsize,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pz) is an element in the pooled output
int pz = index % pooled_zsize;
int pw = (index / pooled_zsize) % pooled_width;
int ph = (index / pooled_zsize/ pooled_width) % pooled_height;
int c = (index / pooled_zsize/ pooled_width / pooled_height) % channels;
int n = index / pooled_zsize/ pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 8;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_center_z = offset_bottom_rois[3] * spatial_scale;
T roi_width = offset_bottom_rois[4] * spatial_scale;
T roi_height = offset_bottom_rois[5] * spatial_scale;
T roi_zsize = offset_bottom_rois[6] * spatial_scale;
T theta = offset_bottom_rois[7] * M_PI / 180.0;
// T roi_center_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_center_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_width = round(offset_bottom_rois[3] * spatial_scale);
// T roi_height = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
roi_zsize = max(roi_zsize, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_z = static_cast<T>(roi_zsize) / static_cast<T>(pooled_zsize);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width * zsize;
int top_offset = (n * channels + c) * pooled_height * pooled_width * pooled_zsize;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width * pooled_zsize + pw * pooled_zsize + pz];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_z = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_zsize / pooled_zsize);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T roi_start_z = -roi_zsize / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_z; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_z; iz ++)
{
const T zz = roi_start_z + pz * bin_size_z + static_cast<T>(iz + .5f) * bin_size_z / static_cast<T>(roi_bin_grid_z);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T z = zz + roi_center_z;
T w1, w2, w3, w4, w5, w6, w7, w8;
int x_low, x_high, y_low, y_high, z_low, z_high;
bilinear_interpolate_gradient(height, width, zsize, y, x, z,
w1, w2, w3, w4, w5, w6, w7, w8,
x_low, x_high, y_low, y_high, z_low, z_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
T g5 = top_diff_this_bin * w5 / count;
T g6 = top_diff_this_bin * w6 / count;
T g7 = top_diff_this_bin * w7 / count;
T g8 = top_diff_this_bin * w8 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0 && z_low >= 0 && z_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_low * zsize + z_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_high * zsize + z_low, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_low * zsize + z_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_high * zsize + z_low, static_cast<T>(g4));
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_low * zsize + z_high, static_cast<T>(g5));
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_high * zsize + z_high, static_cast<T>(g6));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_low * zsize + z_high, static_cast<T>(g7));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_high * zsize + z_high, static_cast<T>(g8));
} // if
}
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor ROIAlignRotated3D_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto zsize = input.size(4);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width, pooled_zsize}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * pooled_zsize * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignRotated3D_forward", [&] {
hipLaunchKernelGGL(( RoIAlignRotated3DForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
zsize,
pooled_height,
pooled_width,
pooled_zsize,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated3D_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int batch_size,
const int channels,
const int height,
const int width,
const int zsize,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width, zsize}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlignRotated3D_backward", [&] {
hipLaunchKernelGGL(( RoIAlignRotated3DBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
zsize,
pooled_height,
pooled_width,
pooled_zsize,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return grad_input;
}
|
e98b05d51f5d1670d680904b037972940de40037.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
const int zsize,
T y,
T x,
T z,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || zsize> zsize) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
if (z <= 0) z = 0;
int y_low = (int) y;
int x_low = (int) x;
int z_low = (int) z;
int y_high;
int x_high;
int z_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
if (z_low >= zsize - 1) {
z_high = z_low = zsize - 1;
z = (T) z_low;
} else {
z_high = z_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T lz = z - z_low;
T hy = 1. - ly, hx = 1. - lx, hz = 1. - lz;
// do bilinear interpolation
T v1 = bottom_data[y_low * width * zsize + x_low * zsize + z_low];
T v2 = bottom_data[y_low * width * zsize + x_high * zsize + z_low];
T v3 = bottom_data[y_high * width * zsize + x_low * zsize + z_low];
T v4 = bottom_data[y_high * width * zsize + x_high * zsize + z_low];
T v5 = bottom_data[y_low * width * zsize + x_low * zsize + z_high];
T v6 = bottom_data[y_low * width * zsize + x_high * zsize + z_high];
T v7 = bottom_data[y_high * width * zsize + x_low * zsize + z_high];
T v8 = bottom_data[y_high * width * zsize + x_high * zsize + z_high];
T w1 = hy * hx * hz, w2 = hy * lx * hz, w3 = ly * hx * hz, w4 = ly * lx * hz;
T w5 = hy * hx * lz, w6 = hy * lx * lz, w7 = ly * hx * lz, w8 = ly * lx * lz;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4 + w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8);
return val;
}
//**********************************************************************************************
template <typename T>
__global__ void RoIAlignRotated3DForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int zsize,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int sampling_ratio,
const T* bottom_rois,
T* top_data ) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pz) is an element in the pooled output
int pz = index % pooled_zsize;
int pw = (index / pooled_zsize) % pooled_width;
int ph = (index / pooled_zsize/ pooled_width) % pooled_height;
int c = (index / pooled_zsize/ pooled_width / pooled_height) % channels;
int n = index / pooled_zsize/ pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 8;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_center_z = offset_bottom_rois[3] * spatial_scale;
T roi_width = offset_bottom_rois[4] * spatial_scale;
T roi_height = offset_bottom_rois[5] * spatial_scale;
T roi_zsize = offset_bottom_rois[6] * spatial_scale;
T theta = offset_bottom_rois[7] * M_PI / 180.0;
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
roi_zsize = max(roi_zsize, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_z = static_cast<T>(roi_zsize) / static_cast<T>(pooled_zsize);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width * zsize;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_z =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_zsize / pooled_zsize);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T roi_start_z = -roi_zsize / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_z; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_z; iz++) {
const T zz = roi_start_z + pz * bin_size_z + static_cast<T>(iz + .5f) * bin_size_z / static_cast<T>(roi_bin_grid_z);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T z = zz + roi_center_z;
T val = bilinear_interpolate(
offset_bottom_data, height, width, zsize, y, x, z, index);
output_val += val;
}
}
}
output_val /= count;
top_data[index] = output_val;
}
}
//**********************************************************************************************
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width, const int zsize,
T y, T x, T z,
T & w1, T & w2, T & w3, T & w4, T & w5, T & w6, T & w7, T & w8,
int & x_low, int & x_high, int & y_low, int & y_high, int & z_low, int & z_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width || z < -1.0 || z > zsize) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = z_low = z_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
if (z <= 0) z = 0;
y_low = (int) y;
x_low = (int) x;
z_low = (int) z;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
if (z_low >= zsize - 1) {
z_high = z_low = zsize - 1;
z = (T) z_low;
} else {
z_high = z_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T lz = z - z_low;
T hy = 1. - ly, hx = 1. - lx, hz = 1. - lz;
w1 = hy * hx * hz, w2 = hy * lx * hz, w3 = ly * hx * hz, w4 = ly * lx * hz;
w5 = hy * hx * lz, w6 = hy * lx * lz, w7 = ly * hx * lz, w8 = ly * lx * lz;
return;
}
template <typename T>
__global__ void RoIAlignRotated3DBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int zsize,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw, pz) is an element in the pooled output
int pz = index % pooled_zsize;
int pw = (index / pooled_zsize) % pooled_width;
int ph = (index / pooled_zsize/ pooled_width) % pooled_height;
int c = (index / pooled_zsize/ pooled_width / pooled_height) % channels;
int n = index / pooled_zsize/ pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 8;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_center_w = offset_bottom_rois[1] * spatial_scale;
T roi_center_h = offset_bottom_rois[2] * spatial_scale;
T roi_center_z = offset_bottom_rois[3] * spatial_scale;
T roi_width = offset_bottom_rois[4] * spatial_scale;
T roi_height = offset_bottom_rois[5] * spatial_scale;
T roi_zsize = offset_bottom_rois[6] * spatial_scale;
T theta = offset_bottom_rois[7] * M_PI / 180.0;
// T roi_center_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_center_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_width = round(offset_bottom_rois[3] * spatial_scale);
// T roi_height = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
roi_zsize = max(roi_zsize, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T bin_size_z = static_cast<T>(roi_zsize) / static_cast<T>(pooled_zsize);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width * zsize;
int top_offset = (n * channels + c) * pooled_height * pooled_width * pooled_zsize;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width * pooled_zsize + pw * pooled_zsize + pz];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
int roi_bin_grid_z = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_zsize / pooled_zsize);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T roi_start_z = -roi_zsize / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w * roi_bin_grid_z; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
for (int iz = 0; iz < roi_bin_grid_z; iz ++)
{
const T zz = roi_start_z + pz * bin_size_z + static_cast<T>(iz + .5f) * bin_size_z / static_cast<T>(roi_bin_grid_z);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T z = zz + roi_center_z;
T w1, w2, w3, w4, w5, w6, w7, w8;
int x_low, x_high, y_low, y_high, z_low, z_high;
bilinear_interpolate_gradient(height, width, zsize, y, x, z,
w1, w2, w3, w4, w5, w6, w7, w8,
x_low, x_high, y_low, y_high, z_low, z_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
T g5 = top_diff_this_bin * w5 / count;
T g6 = top_diff_this_bin * w6 / count;
T g7 = top_diff_this_bin * w7 / count;
T g8 = top_diff_this_bin * w8 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0 && z_low >= 0 && z_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_low * zsize + z_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_high * zsize + z_low, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_low * zsize + z_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_high * zsize + z_low, static_cast<T>(g4));
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_low * zsize + z_high, static_cast<T>(g5));
atomicAdd(offset_bottom_diff + y_low * width * zsize + x_high * zsize + z_high, static_cast<T>(g6));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_low * zsize + z_high, static_cast<T>(g7));
atomicAdd(offset_bottom_diff + y_high * width * zsize + x_high * zsize + z_high, static_cast<T>(g8));
} // if
}
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor ROIAlignRotated3D_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto zsize = input.size(4);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width, pooled_zsize}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * pooled_zsize * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignRotated3D_forward", [&] {
RoIAlignRotated3DForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
zsize,
pooled_height,
pooled_width,
pooled_zsize,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlignRotated3D_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int pooled_zsize,
const int batch_size,
const int channels,
const int height,
const int width,
const int zsize,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width, zsize}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlignRotated3D_backward", [&] {
RoIAlignRotated3DBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
zsize,
pooled_height,
pooled_width,
pooled_zsize,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
24fc7b2e4b18fbb95220b8a6fe5661016ca24dec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(const int nthreads, const T* input_rois,
const T* out_grad, const int num_rois,
const float spatial_scale,
const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width,
const int sampling_ratio,
int* roi_batch_id_data, T* input_grad) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and imgs "
"batch_size must be the same. But received rois_batch_size = %d, "
"batch_size = %d",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto lod = rois->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto rois_lod = lod.back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( GPUROIAlignForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
auto roi_ptr =
memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUROIAlignBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data,
in_grad->mutable_data<T>(ctx.GetPlace()));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
24fc7b2e4b18fbb95220b8a6fe5661016ca24dec.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(const int nthreads, const T* input_rois,
const T* out_grad, const int num_rois,
const float spatial_scale,
const int channels, const int height,
const int width, const int pooled_height,
const int pooled_width,
const int sampling_ratio,
int* roi_batch_id_data, T* input_grad) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_xmin = offset_input_rois[0] * spatial_scale;
T roi_ymin = offset_input_rois[1] * spatial_scale;
T roi_xmax = offset_input_rois[2] * spatial_scale;
T roi_ymax = offset_input_rois[3] * spatial_scale;
T roi_width = max(roi_xmax - roi_xmin, static_cast<T>(1.));
T roi_height = max(roi_ymax - roi_ymin, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and imgs "
"batch_size must be the same. But received rois_batch_size = %d, "
"batch_size = %d",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto lod = rois->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto rois_lod = lod.back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
GPUROIAlignForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()));
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
auto roi_ptr =
memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUROIAlignBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data,
in_grad->mutable_data<T>(ctx.GetPlace()));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
ef4db680d18f66dcdbb61a50c3bf52052bc1485b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define IND(i, j) ((i) * (N + 2) + (j))
enum {
N = 1024,
ITERS_MAX = 1,
BLOCK_1D_SIZE = 1024,
BLOCK_2D_SIZE = 32
};
typedef uint8_t cell_t;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void copy_ghost_rows(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i <= n + 1) {
// Bottom ghost row: [N + 1][0..N + 1] <== [1][0..N + 1]
grid[IND(N + 1, i)] = grid[IND(1, i)];
// Top ghost row: [0][0..N + 1] <== [N][0..N + 1]
grid[IND(0, i)] = grid[IND(N, i)];
}
}
__global__ void copy_ghost_cols(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (i <= n) {
// Right ghost column: [1..N][N + 1] <== [1..N][1]
grid[IND(i, N + 1)] = grid[IND(i, 1)];
// Left ghost column: [1..N][1] <== [1..N][N]
grid[IND(i, 0)] = grid[IND(i, N)];
}
}
__constant__ int states[2][9] = {
{0, 0, 0, 1, 0, 0, 0, 0, 0}, /* New states for a dead cell */
{0, 0, 1, 1, 0, 0, 0, 0, 0} /* New states for an alive cell */
};
__global__ void update_cells(cell_t *grid, cell_t *newgrid, int n)
{
int iy = blockIdx.y * (blockDim.y - 2) + threadIdx.y;
int ix = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
int i = threadIdx.y;
int j = threadIdx.x;
// Copy cells of the block into shared memory
__shared__ cell_t s_grid[BLOCK_2D_SIZE][BLOCK_2D_SIZE];
if (ix <= n + 1 && iy <= n + 1)
s_grid[i][j] = grid[IND(iy, ix)];
__syncthreads();
if (ix <= n && iy <= n) {
if (i > 0 && i != blockDim.y - 1 && j > 0 && j != blockDim.x - 1) {
int nneibs = s_grid[i + 1][j] + s_grid[i - 1][j] + s_grid[i][j + 1] + s_grid[i][j - 1] +
s_grid[i + 1][j + 1] + s_grid[i - 1][j - 1] +
s_grid[i - 1][j + 1] + s_grid[i + 1][j - 1];
cell_t state = s_grid[i][j];
newgrid[IND(iy, ix)] = states[state][nneibs];
}
}
}
int main(int argc, char* argv[])
{
// Grid with periodic boundary conditions (ghost cells)
size_t ncells = (N + 2) * (N + 2);
size_t size = sizeof(cell_t) * ncells;
cell_t *grid = (cell_t *)malloc(size);
// Initial population
srand(0);
for (int i = 1; i <= N; i++)
for (int j = 1; j <= N; j++)
grid[IND(i, j)] = rand() % 2;
cell_t *d_grid, *d_newgrid;
double tmem = -wtime();
hipMalloc((void **)&d_grid, size);
hipMalloc((void **)&d_newgrid, size);
hipMemcpy(d_grid, grid, size, hipMemcpyHostToDevice);
tmem += wtime();
// 1d grids for copying ghost cells
dim3 block(BLOCK_1D_SIZE, 1, 1);
dim3 cols_grid((N + block.x - 1) / block.x, 1, 1);
dim3 rows_grid((N + 2 + block.x - 1) / block.x, 1, 1);
// 2d grid for updating cells: one thread per cell
dim3 block2d(BLOCK_2D_SIZE, BLOCK_2D_SIZE, 1);
// Boundary threads in block only for loading ghost data
int nblocks = ceilf(N / (BLOCK_2D_SIZE - 2));
dim3 grid2d(nblocks, nblocks, 1);
double t = wtime();
int iter = 0;
for (iter = 0; iter < ITERS_MAX; iter++) {
// Copy ghost cells
hipLaunchKernelGGL(( copy_ghost_cols), dim3(cols_grid), dim3(block), 0, 0, d_grid, N);
hipLaunchKernelGGL(( copy_ghost_rows), dim3(rows_grid), dim3(block), 0, 0, d_grid, N);
// Update cells
hipLaunchKernelGGL(( update_cells), dim3(grid2d), dim3(block2d), 0, 0, d_grid, d_newgrid, N);
// Swap grids
cell_t *p = d_grid;
d_grid = d_newgrid;
d_newgrid = p;
}
hipDeviceSynchronize();
t = wtime() - t;
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
tmem -= wtime();
hipMemcpy(grid, d_grid, size, hipMemcpyDeviceToHost);
tmem += wtime();
/*
for (int i = 0; i < N + 2; i++) {
for (int j = 0; j < N + 2; j++)
printf("%1d ", grid[IND(i, j)]);
printf("\n");
}
*/
size_t total = 0;
for (int i = 1; i <= N; i++) {
for (int j = 1; j <= N; j++)
total += grid[IND(i, j)];
}
printf("Game of Life: N = %d, iterations = %d\n", N, iter);
printf("Total alive cells: %lu\n", total);
printf("Iterations time (sec.): %.6f\n", t);
printf("GPU memory ops. time (sec.): %.6f\n", tmem);
printf("Iters per sec.: %.2f\n", iter / t);
printf("Total time (sec.): %.6f\n", t + tmem);
free(grid);
hipFree(d_grid);
hipFree(d_newgrid);
return 0;
}
|
ef4db680d18f66dcdbb61a50c3bf52052bc1485b.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define IND(i, j) ((i) * (N + 2) + (j))
enum {
N = 1024,
ITERS_MAX = 1,
BLOCK_1D_SIZE = 1024,
BLOCK_2D_SIZE = 32
};
typedef uint8_t cell_t;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void copy_ghost_rows(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i <= n + 1) {
// Bottom ghost row: [N + 1][0..N + 1] <== [1][0..N + 1]
grid[IND(N + 1, i)] = grid[IND(1, i)];
// Top ghost row: [0][0..N + 1] <== [N][0..N + 1]
grid[IND(0, i)] = grid[IND(N, i)];
}
}
__global__ void copy_ghost_cols(cell_t *grid, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + 1;
if (i <= n) {
// Right ghost column: [1..N][N + 1] <== [1..N][1]
grid[IND(i, N + 1)] = grid[IND(i, 1)];
// Left ghost column: [1..N][1] <== [1..N][N]
grid[IND(i, 0)] = grid[IND(i, N)];
}
}
__constant__ int states[2][9] = {
{0, 0, 0, 1, 0, 0, 0, 0, 0}, /* New states for a dead cell */
{0, 0, 1, 1, 0, 0, 0, 0, 0} /* New states for an alive cell */
};
__global__ void update_cells(cell_t *grid, cell_t *newgrid, int n)
{
int iy = blockIdx.y * (blockDim.y - 2) + threadIdx.y;
int ix = blockIdx.x * (blockDim.x - 2) + threadIdx.x;
int i = threadIdx.y;
int j = threadIdx.x;
// Copy cells of the block into shared memory
__shared__ cell_t s_grid[BLOCK_2D_SIZE][BLOCK_2D_SIZE];
if (ix <= n + 1 && iy <= n + 1)
s_grid[i][j] = grid[IND(iy, ix)];
__syncthreads();
if (ix <= n && iy <= n) {
if (i > 0 && i != blockDim.y - 1 && j > 0 && j != blockDim.x - 1) {
int nneibs = s_grid[i + 1][j] + s_grid[i - 1][j] + s_grid[i][j + 1] + s_grid[i][j - 1] +
s_grid[i + 1][j + 1] + s_grid[i - 1][j - 1] +
s_grid[i - 1][j + 1] + s_grid[i + 1][j - 1];
cell_t state = s_grid[i][j];
newgrid[IND(iy, ix)] = states[state][nneibs];
}
}
}
int main(int argc, char* argv[])
{
// Grid with periodic boundary conditions (ghost cells)
size_t ncells = (N + 2) * (N + 2);
size_t size = sizeof(cell_t) * ncells;
cell_t *grid = (cell_t *)malloc(size);
// Initial population
srand(0);
for (int i = 1; i <= N; i++)
for (int j = 1; j <= N; j++)
grid[IND(i, j)] = rand() % 2;
cell_t *d_grid, *d_newgrid;
double tmem = -wtime();
cudaMalloc((void **)&d_grid, size);
cudaMalloc((void **)&d_newgrid, size);
cudaMemcpy(d_grid, grid, size, cudaMemcpyHostToDevice);
tmem += wtime();
// 1d grids for copying ghost cells
dim3 block(BLOCK_1D_SIZE, 1, 1);
dim3 cols_grid((N + block.x - 1) / block.x, 1, 1);
dim3 rows_grid((N + 2 + block.x - 1) / block.x, 1, 1);
// 2d grid for updating cells: one thread per cell
dim3 block2d(BLOCK_2D_SIZE, BLOCK_2D_SIZE, 1);
// Boundary threads in block only for loading ghost data
int nblocks = ceilf(N / (BLOCK_2D_SIZE - 2));
dim3 grid2d(nblocks, nblocks, 1);
double t = wtime();
int iter = 0;
for (iter = 0; iter < ITERS_MAX; iter++) {
// Copy ghost cells
copy_ghost_cols<<<cols_grid, block>>>(d_grid, N);
copy_ghost_rows<<<rows_grid, block>>>(d_grid, N);
// Update cells
update_cells<<<grid2d, block2d>>>(d_grid, d_newgrid, N);
// Swap grids
cell_t *p = d_grid;
d_grid = d_newgrid;
d_newgrid = p;
}
cudaDeviceSynchronize();
t = wtime() - t;
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
tmem -= wtime();
cudaMemcpy(grid, d_grid, size, cudaMemcpyDeviceToHost);
tmem += wtime();
/*
for (int i = 0; i < N + 2; i++) {
for (int j = 0; j < N + 2; j++)
printf("%1d ", grid[IND(i, j)]);
printf("\n");
}
*/
size_t total = 0;
for (int i = 1; i <= N; i++) {
for (int j = 1; j <= N; j++)
total += grid[IND(i, j)];
}
printf("Game of Life: N = %d, iterations = %d\n", N, iter);
printf("Total alive cells: %lu\n", total);
printf("Iterations time (sec.): %.6f\n", t);
printf("GPU memory ops. time (sec.): %.6f\n", tmem);
printf("Iters per sec.: %.2f\n", iter / t);
printf("Total time (sec.): %.6f\n", t + tmem);
free(grid);
cudaFree(d_grid);
cudaFree(d_newgrid);
return 0;
}
|
c360986763de157b97722ed5d90a3bc3378d98a7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "FluidGPU.cuh"
#include <cmath>
#include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
float kernel(float r) {
if (r >= 0 && r <= cutoff) {
return 1. / 3.14159 / (powf(cutoff, 3))*(1 - 3. / 2. * powf((r / cutoff), 2) + 3. / 4. * powf((r / cutoff), 3));
}
else if (r > cutoff && r < (2 * cutoff)) {
return 1. / 3.14159 / (powf(cutoff, 3)) * 1 / 4. * powf(2 - (r / cutoff), 3);
}
else {
return 0;
}
}
float kernel_test(float r) {
if (r >= 0 && r <= cutoff) {
return 1. / 3.14159 / (powf(cutoff, 4))*(1 - 3. * powf((r / cutoff), 1) + 9. / 4. * powf((r / cutoff), 2));
}
else if (r > cutoff && r < (2 * cutoff)) {
return -1. / 3.14159 / (powf(cutoff, 4)) * 1 / 2. * powf(2 - (r / cutoff), 2);
}
else {
return 0;
}
}
float kernel_derivative(float r) {
if (r < cutoff) {
return -45.0 / 3.14159 / powf(cutoff, 6)*powf((cutoff - r), 2);
}
else {
return 0;
}
}
//Dot product
inline float dot_prod(float x1, float y1, float z1, float x2, float y2, float z2) {
return x1*x2 + y1*y2 + z1*z2;
}
//Cross products
inline float cross_prod_x(float x1, float y1, float z1, float x2, float y2, float z2) {
return y1*z2 - z1*y2;
}
inline float cross_prod_y(float x1, float y1, float z1, float x2, float y2, float z2) {
return -x1*z2 + z1*x2;
}
inline float cross_prod_z(float x1, float y1, float z1, float x2, float y2, float z2) {
return x1*y2 - y1*x2;
}
__device__ int morton(unsigned int x, unsigned int y, unsigned int z) {
//int x = (bidx / GRIDSIZE / GRIDSIZE);
//int y = (bidx / GRIDSIZE % GRIDSIZE);
//int z = (bidx % GRIDSIZE);
x = (x | (x << 16)) & 0x030000FF;
x = (x | (x << 8)) & 0x0300F00F;
x = (x | (x << 4)) & 0x030C30C3;
x = (x | (x << 2)) & 0x09249249;
y = (y | (y << 16)) & 0x030000FF;
y = (y | (y << 8)) & 0x0300F00F;
y = (y | (y << 4)) & 0x030C30C3;
y = (y | (y << 2)) & 0x09249249;
z = (z | (z << 16)) & 0x030000FF;
z = (z | (z << 8)) & 0x0300F00F;
z = (z | (z << 4)) & 0x030C30C3;
z = (z | (z << 2)) & 0x09249249;
return x | (y << 1) | (z << 2);
}
__device__ inline int demorton(unsigned int x, int b) {
//b should be 0 for x, 1 for y, 2 for z
switch (b) {
case 0: break;
case 1: x = (x >> 1);
break;
case 2: x = (x >> 2);
break;
}
x &= 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
x = (x | (x >> 2)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x | (x >> 4)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x | (x >> 8)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x | (x >> 16)) & 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
return x;
}
__global__ void findneighbours(int *cell, int *start, int *end, int nspts) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nspts) {
if (cell[idx] != cell[idx - 1] || idx == 0) {
start[cell[idx]] = idx;
}
if (cell[idx] != cell[idx + 1] || idx == nspts-1) {
end[cell[idx]] = idx;
}
}
}
__global__ void mykernel(Particle *SPptr, int *cell, int *start, int *end, int nspts) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int bidx = blockIdx.x;
int tidx = threadIdx.x;
int nb[27] = { -GRIDSIZE*GRIDSIZE - GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE - GRIDSIZE,-GRIDSIZE*GRIDSIZE - GRIDSIZE + 1, -GRIDSIZE*GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE, -GRIDSIZE*GRIDSIZE + 1, -GRIDSIZE*GRIDSIZE + GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE + GRIDSIZE, -GRIDSIZE*GRIDSIZE + GRIDSIZE + 1,
-GRIDSIZE - 1, -GRIDSIZE,-GRIDSIZE + 1, -1, 0, +1, GRIDSIZE - 1, GRIDSIZE, GRIDSIZE + 1,
GRIDSIZE*GRIDSIZE - GRIDSIZE - 1, GRIDSIZE*GRIDSIZE - GRIDSIZE,GRIDSIZE*GRIDSIZE - GRIDSIZE + 1, GRIDSIZE*GRIDSIZE - 1, GRIDSIZE*GRIDSIZE, GRIDSIZE*GRIDSIZE + 1, GRIDSIZE*GRIDSIZE + GRIDSIZE - 1, GRIDSIZE*GRIDSIZE + GRIDSIZE, GRIDSIZE*GRIDSIZE + GRIDSIZE + 1 };
//__shared__ int nb[27];
//if (tidx < 27) {
// int x = demorton(bidx, 0);
// int y = demorton(bidx, 1);
// int z = demorton(bidx, 2);
// nb[tidx] = morton(x + tidx/9-1, y + (tidx/3)%3-1, z + tidx%3-1);
//}
//__syncthreads();
__shared__ short int p[27];
__shared__ short int pidx[27];
int __shared__ sum[64];// = 0;
int __shared__ jj[64];// = 0;
volatile __shared__ int total;
volatile __shared__ int blockpop;
if (idx < 64) {
sum[idx] = 650;
jj[idx] = 650;
}
__syncthreads();
//__shared__ short int sum[27];
//__shared__ short int j[27];
//if (idx <nspts) { printf("%d, %d \n", idx, SPptr[idx].cellnumber); }
if (tidx < 27) { p[tidx] = 0; }
if (start[bidx] >= 0) {
//if (bidx == 0) { printf("%d\n", start[bidx]); }
///////////count and sort population of neighbour cells//////////////
if (tidx < 27 && bidx+ nb[tidx] >= 0 && bidx + nb[tidx] < NUMCELLS && start[bidx + nb[tidx]] >= 0 && end[bidx + nb[tidx]] >= 0 && start[bidx + nb[tidx]] < nspts && 1 + end[bidx + nb[tidx]] - start[bidx + nb[tidx]] > 0 ) {
p[tidx] = 1 + end[bidx + nb[tidx]] - start[bidx + nb[tidx]]; //count population of neighbour cells so we know how many threads to use
pidx[tidx] = tidx;
}
if (tidx == 13) { blockpop = p[tidx]; }
}
else {
if (tidx == 13) { blockpop = 0; }
}
__syncthreads();
//if (bidx == 21641 && tidx==0) { printf("%d %d %d \n", p[13], nb[13], start[nb[13]]); }
if (start[bidx] >= 0) {
if (tidx == 0) {
total = 0;
for (int i = 0; i < 27; i++) {
if (p[i] < 64 && p[i]>0 && bidx + nb[i] >= 0 && bidx + nb[i] < NUMCELLS && start[bidx + nb[i]] >= 0 && end[bidx + nb[i]] >= 0 && start[bidx + nb[i]] < nspts) { total += p[i]; }
}
}
}
else {
if (tidx == 0) {total = 0; }
}
__syncthreads();
if (start[bidx] >= 0) {
if (tidx == 0) {
int count = 0;
for (int i = 0; i < 27; i++) {
if (p[i] != 0) {
p[count++] = p[i];
pidx[count - 1] = pidx[i]; //sort
}
}
while (count < 27) {
p[count++] = 0; //need to reset popidx in a future kernel
pidx[count - 1] = 0;
}
}
}
__syncthreads();
if (start[bidx] >= 0) {
if (tidx < total) {
sum[tidx] = 0;
jj[tidx] = 0;
while (tidx + 1 > sum[tidx]) {
sum[tidx] += p[jj[tidx]];
jj[tidx]++;
}
}
}
__syncthreads();
//if (bidx== 34624 && tidx < total) { printf("tidx: %d, cell#:%d, jj:%d, sum:%d \n", tidx, bidx + nb[pidx[jj[tidx] - 1]], jj[tidx],p[jj[tidx]]); }
//__syncthreads();
// __shared__ float k[8];
// __shared__ float rabx[8];
// __shared__ float raby[8];
// __shared__ float rabz[8];
// __shared__ float vabx[8];
// __shared__ float vaby[8];
//__shared__ float vabz[8];
if (start[bidx] >= 0) {
if (tidx < total && bidx + nb[pidx[jj[tidx] - 1]] >= 0 && bidx + nb[pidx[jj[tidx] - 1]] < NUMCELLS) {
///////////////////////////////////////////////////////////////////
volatile int j = start[bidx + nb[pidx[jj[tidx] - 1]]] + sum[tidx] - (tidx + 1);
if (start[bidx + nb[pidx[jj[tidx] - 1]]] >= 0 && j < nspts && j >= 0) {
//int i = start[bidx] + tidx / total;
for (volatile int i = start[bidx]; i <= end[bidx]; i++) {
float ds = (SPptr[i]).distance((SPptr[j]));
if (ds <= (2 * cutoff) && ds > 0) {
volatile float k = kernel(ds);
volatile float rabx = (SPptr[i]).rab_x((SPptr[j]));
volatile float raby = (SPptr[i]).rab_y((SPptr[j]));
volatile float rabz = (SPptr[i]).rab_z((SPptr[j]));
volatile float vabx = (SPptr[i]).vab_x((SPptr[j]));
volatile float vaby = (SPptr[i]).vab_y((SPptr[j]));
volatile float vabz = (SPptr[i]).vab_z((SPptr[j]));
volatile float dkx = kernel_derivative(ds)*rabx / ds;
volatile float dky = kernel_derivative(ds)*raby / ds;
volatile float dkz = kernel_derivative(ds)*rabz / ds;
//float dkxtest = kernel_test(ds)*rabx / ds;
//float dkytest = kernel_test(ds)*raby / ds;
//float dkztest = kernel_test(ds)*rabz / ds;
volatile float d = dot_prod(vabx, vaby, vabz, rabx, raby, rabz);
volatile float d2 = powf(ds, 2);
volatile float s = (ALPHA_FLUID * SOUND * (cutoff * (d / (d2 + 0.01*powf(cutoff, 2))) + 50 * 1.0 / SOUND*powf(cutoff * (d / (d2 + 0.01*powf(cutoff, 2))), 2)) / (((SPptr[i]).dens + (SPptr[j]).dens) / 2.0)) *(d < 0)*(1 + (!(SPptr[i]).boundary)*((SPptr[j]).boundary) * ALPHA_BOUNDARY);
//float s2 = ALPHA_LAMINAR_FLUID * SOUND * cutoff / ((SPptr[i]).dens + (SPptr[j]).dens)*d*(d < 0) / (d2 + 0.01*pow(cutoff, 2))*(1 + (!(SPptr[i]).boundary)*((SPptr[j]).boundary) *ALPHA_LAMINAR_BOUNDARY); //laminar
volatile float dpx = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dkx;
volatile float dpy = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dky;
volatile float dpz = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dkz;
//(SPptr[i]).vel_grad[0][0] += -vabx*dkxtest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[0][1] += -vaby*dkxtest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[0][2] += -vabz*dkxtest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[1][0] += -vabx*dkytest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[1][1] += -vaby*dkytest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[1][2] += -vabz*dkytest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[2][0] += -vabx*dkztest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[2][1] += -vaby*dkztest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[2][2] += -vabz*dkztest / (SPptr[i]).dens;
///(SPptr[i]).stress_accel[0] += ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[i]).stress_accel[1] += ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[i]).stress_accel[2] += ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2);
atomicAdd(&(SPptr[i].newdens), k *(1 + float(!(SPptr[i]).boundary)*float((SPptr[j]).boundary)*BDENSFACTOR));
atomicAdd(&(SPptr[i].newdelpressx), dpx);
atomicAdd(&(SPptr[i].newdelpressy), dpy);
atomicAdd(&(SPptr[i].newdelpressz), dpz);
__syncthreads();
}
}
}
}
}
/*
float tempdens = 0;
float tempdelpressx = 0;
float tempdelpressy = 0;
float tempdelpressz = 0;
//float tempdiffusionx = 0;
//float tempdiffusiony = 0;
//float tempdiffusionz = 0;
if (idx<nspts){
for (int i = 0; i < nspts; i++) {
//if (idx != i && SPptr[idx].cellnumber == SPptr[i].cellnumber) { printf("%d, %d, %d \n", SPptr[idx].cellnumber, SPptr[i].cellnumber,neighbours[SPptr[idx].cellnumber*nspts + i]); }
if (neighbours[SPptr[idx].cellnumber*nspts + i]) {
//printf("%d, %d \n", SPptr[idx].cellnumber, SPptr[i].cellnumber);
float ds = (SPptr[idx]).distance((SPptr[i]));
if (ds <= (2 * cutoff) && ds > 0) {
float k = kernel(ds);
float rabx = (SPptr[idx]).rab_x((SPptr[i]));
float raby = (SPptr[idx]).rab_y((SPptr[i]));
float rabz = (SPptr[idx]).rab_z((SPptr[i]));
float vabx = (SPptr[idx]).vab_x((SPptr[i]));
float vaby = (SPptr[idx]).vab_y((SPptr[i]));
float vabz = (SPptr[idx]).vab_z((SPptr[i]));
float dkx = kernel_derivative(ds)*rabx / ds;
float dky = kernel_derivative(ds)*raby / ds;
float dkz = kernel_derivative(ds)*rabz / ds;
float dkxtest = kernel_test(ds)*rabx / ds;
float dkytest = kernel_test(ds)*raby / ds;
float dkztest = kernel_test(ds)*rabz / ds;
float d = dot_prod(vabx, vaby, vabz, rabx, raby, rabz);
float d2 = pow(ds, 2);
float s = (ALPHA_FLUID * SOUND * (cutoff * (d / (d2 + 0.01*pow(cutoff, 2))) + 50 * 1.0 / SOUND*pow(cutoff * (d / (d2 + 0.01*pow(cutoff, 2))), 2)) / (((SPptr[idx]).dens + (SPptr[i]).dens) / 2.0)) *(d < 0)*(1 + (!(SPptr[idx]).boundary)*((SPptr[i]).boundary) * ALPHA_BOUNDARY);
float s2 = ALPHA_LAMINAR_FLUID * SOUND * cutoff / ((SPptr[idx]).dens + (SPptr[i]).dens)*d*(d < 0) / (d2 + 0.01*pow(cutoff, 2))*(1 + (!(SPptr[idx]).boundary)*((SPptr[i]).boundary) *ALPHA_LAMINAR_BOUNDARY); //laminar
float dpx = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dkx;
float dpy = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dky;
float dpz = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dkz;
//(SPptr[index]).vel_grad[0][0] += -vabx*dkxtest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[0][1] += -vaby*dkxtest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[0][2] += -vabz*dkxtest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[1][0] += -vabx*dkytest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[1][1] += -vaby*dkytest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[1][2] += -vabz*dkytest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[2][0] += -vabx*dkztest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[2][1] += -vaby*dkztest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[2][2] += -vabz*dkztest / (SPptr[i]).dens;
///(SPptr[index]).stress_accel[0] += ((SPptr[index]).stress_tensor[0][0] * dkxtest + (SPptr[index]).stress_tensor[0][1] * dkytest + (SPptr[index]).stress_tensor[0][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[index]).stress_accel[1] += ((SPptr[index]).stress_tensor[1][0] * dkxtest + (SPptr[index]).stress_tensor[1][1] * dkytest + (SPptr[index]).stress_tensor[1][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[index]).stress_accel[2] += ((SPptr[index]).stress_tensor[2][0] * dkxtest + (SPptr[index]).stress_tensor[2][1] * dkytest + (SPptr[index]).stress_tensor[2][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2);
tempdens += k*(1 + float(!(SPptr[idx]).boundary)*float((SPptr[i]).boundary)*BDENSFACTOR);
tempdelpressx += dpx;
tempdelpressy += dpy;
tempdelpressz += dpz;
///tempdiffusionx += 1 / (SPptr[i]).dens*dkx;
///tempdiffusiony += 1 / (SPptr[i]).dens*dky;
///tempdiffusionz += 1 / (SPptr[i]).dens*dkz;
}
}
}
(SPptr[idx]).newdens = (tempdens);
(SPptr[idx]).newdelpressx = tempdelpressx;
(SPptr[idx]).newdelpressy = tempdelpressy;
(SPptr[idx]).newdelpressz = tempdelpressz;
//(SPptr[idx]).diffusionx = tempdiffusionx;
//(SPptr[idx]).diffusiony = tempdiffusiony;
//(SPptr[idx]).diffusionz = tempdiffusionz;
/*if ((SPptr[index]).solid) {
float tr = 0; //trace of strain rate
float tr2 = 0; //trace of stress tensor
float tr3 = 0; //double dot of stress tensor
float tr4 = 0; //trace of stress tensor times strain rate
float tr5 = 0; //double dot of strain rate
for (int p = 0; p < 3; p++) {
for (int q = 0; q < 3; q++) {
(SPptr[index]).strain_rate[p][q] = 0.5*((SPptr[index]).vel_grad[p][q] + (SPptr[index]).vel_grad[q][p]);
(SPptr[index]).stress_tensor_squared[p][q] = pow((SPptr[index]).stress_tensor[p][q], 2);
tr3 += 0.5*(SPptr[index]).stress_tensor_squared[p][q];
(SPptr[index]).strain_rate_squared[p][q] = pow((SPptr[index]).strain_rate[p][q], 2);
tr5 += (SPptr[index]).strain_rate_squared[p][q];
tr4 += (SPptr[index]).stress_tensor[p][q] * (SPptr[index]).strain_rate[q][p];
}
tr += (SPptr[index]).strain_rate[p][p];
tr2 += (SPptr[index]).stress_tensor[p][p];
}
// std::cout << (SPptr[index]).press << "\n";
for (int p = 0; p < 3; p++) {
for (int q = 0; q < 3; q++) {
if (3 * tan(PHI) / (sqrt(9 + 12 * pow(tan(PHI), 2)))*(SPptr[index]).press + KC / (sqrt(9 + 12 * pow(tan(PHI), 2))) < tr3 && tr3 != 0) {
(SPptr[index]).stress_tensor[p][q] *= (3 * tan(PHI) / (sqrt(9 + 12 * pow(tan(PHI), 2)))*(SPptr[index]).press + KC / (sqrt(9 + 12 * pow(tan(PHI), 2)))) / tr3;
}
(SPptr[index]).stress_rate[p][q] = 3 * C1*((SPptr[index]).press)*((SPptr[index]).strain_rate[p][q] - 1. / 3.*tr*(p == q)) + C1*C2*(tr4 + tr*(SPptr[index]).press) / (pow((SPptr[index]).press, 2) + 1e8)*(SPptr[index]).stress_tensor[p][q] - C1*C3*sqrt(tr5)*(SPptr[index]).stress_tensor[p][q];
//std::cout << tr4 << ", " << tr*(SPptr[index]).press << "\n";
}
}
}*/
//}
__syncthreads();
}
__global__ void mykernel2(Particle *SPptr, int *cells, int *start, int *end, int nspts, float *spts, float *a3, float *b3) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bidx = blockIdx.x;
int tidx = threadIdx.x;
if (index < nspts) {
if (!(SPptr[index]).flag) {
spts[(3 * index)] = (SPptr[index]).xcoord;
spts[(3 * index) + 1] = (SPptr[index]).ycoord;
spts[(3 * index) + 2] = (SPptr[index]).zcoord;
a3[index] = ((SPptr[index])).dens;
b3[index] = SPptr[index].cellnumber;
}
(SPptr[index]).update();
(SPptr[index]).cellnumber = int((SPptr[index].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[index].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[index].zcoord - ZMIN) / CELLSIZE);
//SPptr[index].cellnumber = morton(int((SPptr[index].xcoord - XMIN) / CELLSIZE), int((SPptr[index].ycoord - YMIN) / CELLSIZE), int((SPptr[index].zcoord - ZMIN) / CELLSIZE));
cells[index] = SPptr[index].cellnumber;
SPptr[index].newdens = 0;
SPptr[index].newdelpressx = 0;
SPptr[index].newdelpressy = 0;
SPptr[index].newdelpressz = 0;
}
if (index < NUMCELLS) {
start[index] = -1;
end[index] = -1;
}
__syncthreads();
}
|
c360986763de157b97722ed5d90a3bc3378d98a7.cu
|
#include "FluidGPU.cuh"
#include <cmath>
#include <cuda_runtime.h>
#include <iostream>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
float kernel(float r) {
if (r >= 0 && r <= cutoff) {
return 1. / 3.14159 / (powf(cutoff, 3))*(1 - 3. / 2. * powf((r / cutoff), 2) + 3. / 4. * powf((r / cutoff), 3));
}
else if (r > cutoff && r < (2 * cutoff)) {
return 1. / 3.14159 / (powf(cutoff, 3)) * 1 / 4. * powf(2 - (r / cutoff), 3);
}
else {
return 0;
}
}
float kernel_test(float r) {
if (r >= 0 && r <= cutoff) {
return 1. / 3.14159 / (powf(cutoff, 4))*(1 - 3. * powf((r / cutoff), 1) + 9. / 4. * powf((r / cutoff), 2));
}
else if (r > cutoff && r < (2 * cutoff)) {
return -1. / 3.14159 / (powf(cutoff, 4)) * 1 / 2. * powf(2 - (r / cutoff), 2);
}
else {
return 0;
}
}
float kernel_derivative(float r) {
if (r < cutoff) {
return -45.0 / 3.14159 / powf(cutoff, 6)*powf((cutoff - r), 2);
}
else {
return 0;
}
}
//Dot product
inline float dot_prod(float x1, float y1, float z1, float x2, float y2, float z2) {
return x1*x2 + y1*y2 + z1*z2;
}
//Cross products
inline float cross_prod_x(float x1, float y1, float z1, float x2, float y2, float z2) {
return y1*z2 - z1*y2;
}
inline float cross_prod_y(float x1, float y1, float z1, float x2, float y2, float z2) {
return -x1*z2 + z1*x2;
}
inline float cross_prod_z(float x1, float y1, float z1, float x2, float y2, float z2) {
return x1*y2 - y1*x2;
}
__device__ int morton(unsigned int x, unsigned int y, unsigned int z) {
//int x = (bidx / GRIDSIZE / GRIDSIZE);
//int y = (bidx / GRIDSIZE % GRIDSIZE);
//int z = (bidx % GRIDSIZE);
x = (x | (x << 16)) & 0x030000FF;
x = (x | (x << 8)) & 0x0300F00F;
x = (x | (x << 4)) & 0x030C30C3;
x = (x | (x << 2)) & 0x09249249;
y = (y | (y << 16)) & 0x030000FF;
y = (y | (y << 8)) & 0x0300F00F;
y = (y | (y << 4)) & 0x030C30C3;
y = (y | (y << 2)) & 0x09249249;
z = (z | (z << 16)) & 0x030000FF;
z = (z | (z << 8)) & 0x0300F00F;
z = (z | (z << 4)) & 0x030C30C3;
z = (z | (z << 2)) & 0x09249249;
return x | (y << 1) | (z << 2);
}
__device__ inline int demorton(unsigned int x, int b) {
//b should be 0 for x, 1 for y, 2 for z
switch (b) {
case 0: break;
case 1: x = (x >> 1);
break;
case 2: x = (x >> 2);
break;
}
x &= 0x09249249; // x = ---- 9--8 --7- -6-- 5--4 --3- -2-- 1--0
x = (x | (x >> 2)) & 0x030c30c3; // x = ---- --98 ---- 76-- --54 ---- 32-- --10
x = (x | (x >> 4)) & 0x0300f00f; // x = ---- --98 ---- ---- 7654 ---- ---- 3210
x = (x | (x >> 8)) & 0xff0000ff; // x = ---- --98 ---- ---- ---- ---- 7654 3210
x = (x | (x >> 16)) & 0x000003ff; // x = ---- ---- ---- ---- ---- --98 7654 3210
return x;
}
__global__ void findneighbours(int *cell, int *start, int *end, int nspts) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nspts) {
if (cell[idx] != cell[idx - 1] || idx == 0) {
start[cell[idx]] = idx;
}
if (cell[idx] != cell[idx + 1] || idx == nspts-1) {
end[cell[idx]] = idx;
}
}
}
__global__ void mykernel(Particle *SPptr, int *cell, int *start, int *end, int nspts) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int bidx = blockIdx.x;
int tidx = threadIdx.x;
int nb[27] = { -GRIDSIZE*GRIDSIZE - GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE - GRIDSIZE,-GRIDSIZE*GRIDSIZE - GRIDSIZE + 1, -GRIDSIZE*GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE, -GRIDSIZE*GRIDSIZE + 1, -GRIDSIZE*GRIDSIZE + GRIDSIZE - 1, -GRIDSIZE*GRIDSIZE + GRIDSIZE, -GRIDSIZE*GRIDSIZE + GRIDSIZE + 1,
-GRIDSIZE - 1, -GRIDSIZE,-GRIDSIZE + 1, -1, 0, +1, GRIDSIZE - 1, GRIDSIZE, GRIDSIZE + 1,
GRIDSIZE*GRIDSIZE - GRIDSIZE - 1, GRIDSIZE*GRIDSIZE - GRIDSIZE,GRIDSIZE*GRIDSIZE - GRIDSIZE + 1, GRIDSIZE*GRIDSIZE - 1, GRIDSIZE*GRIDSIZE, GRIDSIZE*GRIDSIZE + 1, GRIDSIZE*GRIDSIZE + GRIDSIZE - 1, GRIDSIZE*GRIDSIZE + GRIDSIZE, GRIDSIZE*GRIDSIZE + GRIDSIZE + 1 };
//__shared__ int nb[27];
//if (tidx < 27) {
// int x = demorton(bidx, 0);
// int y = demorton(bidx, 1);
// int z = demorton(bidx, 2);
// nb[tidx] = morton(x + tidx/9-1, y + (tidx/3)%3-1, z + tidx%3-1);
//}
//__syncthreads();
__shared__ short int p[27];
__shared__ short int pidx[27];
int __shared__ sum[64];// = 0;
int __shared__ jj[64];// = 0;
volatile __shared__ int total;
volatile __shared__ int blockpop;
if (idx < 64) {
sum[idx] = 650;
jj[idx] = 650;
}
__syncthreads();
//__shared__ short int sum[27];
//__shared__ short int j[27];
//if (idx <nspts) { printf("%d, %d \n", idx, SPptr[idx].cellnumber); }
if (tidx < 27) { p[tidx] = 0; }
if (start[bidx] >= 0) {
//if (bidx == 0) { printf("%d\n", start[bidx]); }
///////////count and sort population of neighbour cells//////////////
if (tidx < 27 && bidx+ nb[tidx] >= 0 && bidx + nb[tidx] < NUMCELLS && start[bidx + nb[tidx]] >= 0 && end[bidx + nb[tidx]] >= 0 && start[bidx + nb[tidx]] < nspts && 1 + end[bidx + nb[tidx]] - start[bidx + nb[tidx]] > 0 ) {
p[tidx] = 1 + end[bidx + nb[tidx]] - start[bidx + nb[tidx]]; //count population of neighbour cells so we know how many threads to use
pidx[tidx] = tidx;
}
if (tidx == 13) { blockpop = p[tidx]; }
}
else {
if (tidx == 13) { blockpop = 0; }
}
__syncthreads();
//if (bidx == 21641 && tidx==0) { printf("%d %d %d \n", p[13], nb[13], start[nb[13]]); }
if (start[bidx] >= 0) {
if (tidx == 0) {
total = 0;
for (int i = 0; i < 27; i++) {
if (p[i] < 64 && p[i]>0 && bidx + nb[i] >= 0 && bidx + nb[i] < NUMCELLS && start[bidx + nb[i]] >= 0 && end[bidx + nb[i]] >= 0 && start[bidx + nb[i]] < nspts) { total += p[i]; }
}
}
}
else {
if (tidx == 0) {total = 0; }
}
__syncthreads();
if (start[bidx] >= 0) {
if (tidx == 0) {
int count = 0;
for (int i = 0; i < 27; i++) {
if (p[i] != 0) {
p[count++] = p[i];
pidx[count - 1] = pidx[i]; //sort
}
}
while (count < 27) {
p[count++] = 0; //need to reset popidx in a future kernel
pidx[count - 1] = 0;
}
}
}
__syncthreads();
if (start[bidx] >= 0) {
if (tidx < total) {
sum[tidx] = 0;
jj[tidx] = 0;
while (tidx + 1 > sum[tidx]) {
sum[tidx] += p[jj[tidx]];
jj[tidx]++;
}
}
}
__syncthreads();
//if (bidx== 34624 && tidx < total) { printf("tidx: %d, cell#:%d, jj:%d, sum:%d \n", tidx, bidx + nb[pidx[jj[tidx] - 1]], jj[tidx],p[jj[tidx]]); }
//__syncthreads();
// __shared__ float k[8];
// __shared__ float rabx[8];
// __shared__ float raby[8];
// __shared__ float rabz[8];
// __shared__ float vabx[8];
// __shared__ float vaby[8];
//__shared__ float vabz[8];
if (start[bidx] >= 0) {
if (tidx < total && bidx + nb[pidx[jj[tidx] - 1]] >= 0 && bidx + nb[pidx[jj[tidx] - 1]] < NUMCELLS) {
///////////////////////////////////////////////////////////////////
volatile int j = start[bidx + nb[pidx[jj[tidx] - 1]]] + sum[tidx] - (tidx + 1);
if (start[bidx + nb[pidx[jj[tidx] - 1]]] >= 0 && j < nspts && j >= 0) {
//int i = start[bidx] + tidx / total;
for (volatile int i = start[bidx]; i <= end[bidx]; i++) {
float ds = (SPptr[i]).distance((SPptr[j]));
if (ds <= (2 * cutoff) && ds > 0) {
volatile float k = kernel(ds);
volatile float rabx = (SPptr[i]).rab_x((SPptr[j]));
volatile float raby = (SPptr[i]).rab_y((SPptr[j]));
volatile float rabz = (SPptr[i]).rab_z((SPptr[j]));
volatile float vabx = (SPptr[i]).vab_x((SPptr[j]));
volatile float vaby = (SPptr[i]).vab_y((SPptr[j]));
volatile float vabz = (SPptr[i]).vab_z((SPptr[j]));
volatile float dkx = kernel_derivative(ds)*rabx / ds;
volatile float dky = kernel_derivative(ds)*raby / ds;
volatile float dkz = kernel_derivative(ds)*rabz / ds;
//float dkxtest = kernel_test(ds)*rabx / ds;
//float dkytest = kernel_test(ds)*raby / ds;
//float dkztest = kernel_test(ds)*rabz / ds;
volatile float d = dot_prod(vabx, vaby, vabz, rabx, raby, rabz);
volatile float d2 = powf(ds, 2);
volatile float s = (ALPHA_FLUID * SOUND * (cutoff * (d / (d2 + 0.01*powf(cutoff, 2))) + 50 * 1.0 / SOUND*powf(cutoff * (d / (d2 + 0.01*powf(cutoff, 2))), 2)) / (((SPptr[i]).dens + (SPptr[j]).dens) / 2.0)) *(d < 0)*(1 + (!(SPptr[i]).boundary)*((SPptr[j]).boundary) * ALPHA_BOUNDARY);
//float s2 = ALPHA_LAMINAR_FLUID * SOUND * cutoff / ((SPptr[i]).dens + (SPptr[j]).dens)*d*(d < 0) / (d2 + 0.01*pow(cutoff, 2))*(1 + (!(SPptr[i]).boundary)*((SPptr[j]).boundary) *ALPHA_LAMINAR_BOUNDARY); //laminar
volatile float dpx = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dkx;
volatile float dpy = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dky;
volatile float dpz = ((SPptr[j]).press / powf((SPptr[j]).dens, 2) + (SPptr[i]).press / powf((SPptr[i]).dens, 2) + s)*dkz;
//(SPptr[i]).vel_grad[0][0] += -vabx*dkxtest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[0][1] += -vaby*dkxtest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[0][2] += -vabz*dkxtest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[1][0] += -vabx*dkytest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[1][1] += -vaby*dkytest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[1][2] += -vabz*dkytest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[2][0] += -vabx*dkztest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[2][1] += -vaby*dkztest / (SPptr[i]).dens;
//(SPptr[i]).vel_grad[2][2] += -vabz*dkztest / (SPptr[i]).dens;
///(SPptr[i]).stress_accel[0] += ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[i]).stress_accel[1] += ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[i]).stress_accel[2] += ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2) + ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2);
atomicAdd(&(SPptr[i].newdens), k *(1 + float(!(SPptr[i]).boundary)*float((SPptr[j]).boundary)*BDENSFACTOR));
atomicAdd(&(SPptr[i].newdelpressx), dpx);
atomicAdd(&(SPptr[i].newdelpressy), dpy);
atomicAdd(&(SPptr[i].newdelpressz), dpz);
__syncthreads();
}
}
}
}
}
/*
float tempdens = 0;
float tempdelpressx = 0;
float tempdelpressy = 0;
float tempdelpressz = 0;
//float tempdiffusionx = 0;
//float tempdiffusiony = 0;
//float tempdiffusionz = 0;
if (idx<nspts){
for (int i = 0; i < nspts; i++) {
//if (idx != i && SPptr[idx].cellnumber == SPptr[i].cellnumber) { printf("%d, %d, %d \n", SPptr[idx].cellnumber, SPptr[i].cellnumber,neighbours[SPptr[idx].cellnumber*nspts + i]); }
if (neighbours[SPptr[idx].cellnumber*nspts + i]) {
//printf("%d, %d \n", SPptr[idx].cellnumber, SPptr[i].cellnumber);
float ds = (SPptr[idx]).distance((SPptr[i]));
if (ds <= (2 * cutoff) && ds > 0) {
float k = kernel(ds);
float rabx = (SPptr[idx]).rab_x((SPptr[i]));
float raby = (SPptr[idx]).rab_y((SPptr[i]));
float rabz = (SPptr[idx]).rab_z((SPptr[i]));
float vabx = (SPptr[idx]).vab_x((SPptr[i]));
float vaby = (SPptr[idx]).vab_y((SPptr[i]));
float vabz = (SPptr[idx]).vab_z((SPptr[i]));
float dkx = kernel_derivative(ds)*rabx / ds;
float dky = kernel_derivative(ds)*raby / ds;
float dkz = kernel_derivative(ds)*rabz / ds;
float dkxtest = kernel_test(ds)*rabx / ds;
float dkytest = kernel_test(ds)*raby / ds;
float dkztest = kernel_test(ds)*rabz / ds;
float d = dot_prod(vabx, vaby, vabz, rabx, raby, rabz);
float d2 = pow(ds, 2);
float s = (ALPHA_FLUID * SOUND * (cutoff * (d / (d2 + 0.01*pow(cutoff, 2))) + 50 * 1.0 / SOUND*pow(cutoff * (d / (d2 + 0.01*pow(cutoff, 2))), 2)) / (((SPptr[idx]).dens + (SPptr[i]).dens) / 2.0)) *(d < 0)*(1 + (!(SPptr[idx]).boundary)*((SPptr[i]).boundary) * ALPHA_BOUNDARY);
float s2 = ALPHA_LAMINAR_FLUID * SOUND * cutoff / ((SPptr[idx]).dens + (SPptr[i]).dens)*d*(d < 0) / (d2 + 0.01*pow(cutoff, 2))*(1 + (!(SPptr[idx]).boundary)*((SPptr[i]).boundary) *ALPHA_LAMINAR_BOUNDARY); //laminar
float dpx = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dkx;
float dpy = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dky;
float dpz = ((SPptr[i]).press / pow((SPptr[i]).dens, 2) + (SPptr[idx]).press / pow((SPptr[idx]).dens, 2) + s + s2)*dkz;
//(SPptr[index]).vel_grad[0][0] += -vabx*dkxtest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[0][1] += -vaby*dkxtest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[0][2] += -vabz*dkxtest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[1][0] += -vabx*dkytest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[1][1] += -vaby*dkytest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[1][2] += -vabz*dkytest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[2][0] += -vabx*dkztest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[2][1] += -vaby*dkztest / (SPptr[i]).dens;
//(SPptr[index]).vel_grad[2][2] += -vabz*dkztest / (SPptr[i]).dens;
///(SPptr[index]).stress_accel[0] += ((SPptr[index]).stress_tensor[0][0] * dkxtest + (SPptr[index]).stress_tensor[0][1] * dkytest + (SPptr[index]).stress_tensor[0][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[0][0] * dkxtest + (SPptr[i]).stress_tensor[0][1] * dkytest + (SPptr[i]).stress_tensor[0][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[index]).stress_accel[1] += ((SPptr[index]).stress_tensor[1][0] * dkxtest + (SPptr[index]).stress_tensor[1][1] * dkytest + (SPptr[index]).stress_tensor[1][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[1][0] * dkxtest + (SPptr[i]).stress_tensor[1][1] * dkytest + (SPptr[i]).stress_tensor[1][2] * dkztest) / pow((SPptr[i]).dens, 2);
///(SPptr[index]).stress_accel[2] += ((SPptr[index]).stress_tensor[2][0] * dkxtest + (SPptr[index]).stress_tensor[2][1] * dkytest + (SPptr[index]).stress_tensor[2][2] * dkztest) / pow((SPptr[index]).dens, 2) + ((SPptr[i]).stress_tensor[2][0] * dkxtest + (SPptr[i]).stress_tensor[2][1] * dkytest + (SPptr[i]).stress_tensor[2][2] * dkztest) / pow((SPptr[i]).dens, 2);
tempdens += k*(1 + float(!(SPptr[idx]).boundary)*float((SPptr[i]).boundary)*BDENSFACTOR);
tempdelpressx += dpx;
tempdelpressy += dpy;
tempdelpressz += dpz;
///tempdiffusionx += 1 / (SPptr[i]).dens*dkx;
///tempdiffusiony += 1 / (SPptr[i]).dens*dky;
///tempdiffusionz += 1 / (SPptr[i]).dens*dkz;
}
}
}
(SPptr[idx]).newdens = (tempdens);
(SPptr[idx]).newdelpressx = tempdelpressx;
(SPptr[idx]).newdelpressy = tempdelpressy;
(SPptr[idx]).newdelpressz = tempdelpressz;
//(SPptr[idx]).diffusionx = tempdiffusionx;
//(SPptr[idx]).diffusiony = tempdiffusiony;
//(SPptr[idx]).diffusionz = tempdiffusionz;
/*if ((SPptr[index]).solid) {
float tr = 0; //trace of strain rate
float tr2 = 0; //trace of stress tensor
float tr3 = 0; //double dot of stress tensor
float tr4 = 0; //trace of stress tensor times strain rate
float tr5 = 0; //double dot of strain rate
for (int p = 0; p < 3; p++) {
for (int q = 0; q < 3; q++) {
(SPptr[index]).strain_rate[p][q] = 0.5*((SPptr[index]).vel_grad[p][q] + (SPptr[index]).vel_grad[q][p]);
(SPptr[index]).stress_tensor_squared[p][q] = pow((SPptr[index]).stress_tensor[p][q], 2);
tr3 += 0.5*(SPptr[index]).stress_tensor_squared[p][q];
(SPptr[index]).strain_rate_squared[p][q] = pow((SPptr[index]).strain_rate[p][q], 2);
tr5 += (SPptr[index]).strain_rate_squared[p][q];
tr4 += (SPptr[index]).stress_tensor[p][q] * (SPptr[index]).strain_rate[q][p];
}
tr += (SPptr[index]).strain_rate[p][p];
tr2 += (SPptr[index]).stress_tensor[p][p];
}
// std::cout << (SPptr[index]).press << "\n";
for (int p = 0; p < 3; p++) {
for (int q = 0; q < 3; q++) {
if (3 * tan(PHI) / (sqrt(9 + 12 * pow(tan(PHI), 2)))*(SPptr[index]).press + KC / (sqrt(9 + 12 * pow(tan(PHI), 2))) < tr3 && tr3 != 0) {
(SPptr[index]).stress_tensor[p][q] *= (3 * tan(PHI) / (sqrt(9 + 12 * pow(tan(PHI), 2)))*(SPptr[index]).press + KC / (sqrt(9 + 12 * pow(tan(PHI), 2)))) / tr3;
}
(SPptr[index]).stress_rate[p][q] = 3 * C1*((SPptr[index]).press)*((SPptr[index]).strain_rate[p][q] - 1. / 3.*tr*(p == q)) + C1*C2*(tr4 + tr*(SPptr[index]).press) / (pow((SPptr[index]).press, 2) + 1e8)*(SPptr[index]).stress_tensor[p][q] - C1*C3*sqrt(tr5)*(SPptr[index]).stress_tensor[p][q];
//std::cout << tr4 << ", " << tr*(SPptr[index]).press << "\n";
}
}
}*/
//}
__syncthreads();
}
__global__ void mykernel2(Particle *SPptr, int *cells, int *start, int *end, int nspts, float *spts, float *a3, float *b3) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bidx = blockIdx.x;
int tidx = threadIdx.x;
if (index < nspts) {
if (!(SPptr[index]).flag) {
spts[(3 * index)] = (SPptr[index]).xcoord;
spts[(3 * index) + 1] = (SPptr[index]).ycoord;
spts[(3 * index) + 2] = (SPptr[index]).zcoord;
a3[index] = ((SPptr[index])).dens;
b3[index] = SPptr[index].cellnumber;
}
(SPptr[index]).update();
(SPptr[index]).cellnumber = int((SPptr[index].xcoord - XMIN) / CELLSIZE)*GRIDSIZE*GRIDSIZE + int((SPptr[index].ycoord - YMIN) / CELLSIZE)*GRIDSIZE + int((SPptr[index].zcoord - ZMIN) / CELLSIZE);
//SPptr[index].cellnumber = morton(int((SPptr[index].xcoord - XMIN) / CELLSIZE), int((SPptr[index].ycoord - YMIN) / CELLSIZE), int((SPptr[index].zcoord - ZMIN) / CELLSIZE));
cells[index] = SPptr[index].cellnumber;
SPptr[index].newdens = 0;
SPptr[index].newdelpressx = 0;
SPptr[index].newdelpressy = 0;
SPptr[index].newdelpressz = 0;
}
if (index < NUMCELLS) {
start[index] = -1;
end[index] = -1;
}
__syncthreads();
}
|
bc56c90040bc1e3c78e91531f06eec9d5c8d2e83.hip
|
// !!! This is a file automatically generated by hipify!!!
//----------------------------------------
//- Hello CUDA
//-
//----------------------------------------
//
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define DATLEN 16
int Data[DATLEN];
int *GPUMem;
//----------------------------------------
//- GPU
//- __global__
//-
//----------------------------------------
//
__global__ void GPUadd(int *src, int len, int dat)
{
int threads,thnum;
int start, size;
start = gridDim.x;
threads = gridDim.x * blockDim.x; //
size = len / threads; //
thnum = blockDim.x * blockIdx.x + threadIdx.x; //
start = size * thnum; //
for (int i = 0; i < size; i++) //
src[start++] += dat; // dat
return;
}
//----------------------------------------
//-
//----------------------------------------
//
void DispData(const char *s, int *dat)
{
printf("%s ",s); //
for (int i=0; i<DATLEN; i++) //
printf("%02d ",*dat++); //
printf("\n"); //
}
//----------------------------------------
//-
//----------------------------------------
//
int main(int argc, char *argv[])
{
int i;
size_t DataBytes;
printf("Welcom to CUDA!\n"); //
for (i=0; i<DATLEN;i++) // 10
Data[i] = i+10; //
DispData("GPU IN :-", Data); //
DataBytes = sizeof(int) * DATLEN; //
hipMalloc((void **)&GPUMem, DataBytes); // GPU
hipMemcpy(GPUMem, Data, // Data[]
DataBytes,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( GPUadd), dim3(2), dim3(4), 0, 0, GPUMem, DATLEN, 3); // GPUadd()GPU
hipMemcpy(Data, GPUMem, DataBytes, // Data[]
hipMemcpyDeviceToHost);
DispData("GPU OUT:-", Data); //
printf("Congraturations!\n"); //
return 0;
}
|
bc56c90040bc1e3c78e91531f06eec9d5c8d2e83.cu
|
//----------------------------------------
//- Hello CUDA
//- マルチスレッドバージョン
//----------------------------------------
//
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define DATLEN 16
int Data[DATLEN];
int *GPUMem;
//----------------------------------------
//- GPUで実行される部分
//- 関数名の前に__global__を付ける
//- 引数は呼び出し側で自由に設定できる
//----------------------------------------
//
__global__ void GPUadd(int *src, int len, int dat)
{
int threads,thnum;
int start, size;
start = gridDim.x;
threads = gridDim.x * blockDim.x; // 総スレッド数
size = len / threads; // 1スレッドの担当データ数
thnum = blockDim.x * blockIdx.x + threadIdx.x; // このスレッドの通し番号
start = size * thnum; // このスレッドが担当する先頭位置
for (int i = 0; i < size; i++) // 耐えられたデータ長分だけ
src[start++] += dat; // 配列にdat値を加算
return;
}
//----------------------------------------
//- 配列データ表示
//----------------------------------------
//
void DispData(const char *s, int *dat)
{
printf("%s ",s); // データのキャプション
for (int i=0; i<DATLEN; i++) // 配列データサイズ分全部表示
printf("%02d ",*dat++); // 表示して
printf("\n"); // 最後に改行しておく
}
//----------------------------------------
//- メイン
//----------------------------------------
//
int main(int argc, char *argv[])
{
int i;
size_t DataBytes;
printf("Welcom to CUDA!\n"); // ようこそ!
for (i=0; i<DATLEN;i++) // 初期値は10からのインクリメントデータにした
Data[i] = i+10; // (別になんでも良いのだけど)
DispData("GPU IN :-", Data); // とりあえず中身を表示しておく
DataBytes = sizeof(int) * DATLEN; // 配列データの総バイト数を計算して
cudaMalloc((void **)&GPUMem, DataBytes); // GPUとの共有メモリ領域から転送サイズ分を確保
cudaMemcpy(GPUMem, Data, // Data[]を共有メモリにコピー
DataBytes,cudaMemcpyHostToDevice);
GPUadd<<<2, 4>>>(GPUMem, DATLEN, 3); // GPUadd()関数をGPUで実行
cudaMemcpy(Data, GPUMem, DataBytes, // 完了してから共有メモリからData[]にコピー
cudaMemcpyDeviceToHost);
DispData("GPU OUT:-", Data); // 中身を表示
printf("Congraturations!\n"); // おめでとうございます!
return 0;
}
|
b4e8c8c54585b88f9c03002958b9d5ca0989131c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
Name: GPU_PHASE2.cu
Desc: This file contains GPU kernels for building a kd-tree
The kd-nodes are stored in a left balanced layout
Notes:
Kd-tree attributes
static -- we need to know all points "a priori" before building the kd-tree
balanced -- Tree has maximum height of O( log<2> n )
Left-Balanced tree array layout
-- The kd-nodes in the kd-tree are stored in a left-balanced tree layout
-- Given n points, We allocate n+1 nodes
-- The kd-node at index zero is ignored (wasted space)
-- The Root kd-node is always found at index 1
-- Given any node at position 'i'
-- The parent node is found at 'i/2'
-- The left child node is found at '2*i'
-- The right child node is found at '2*i+1'
d-Dimensionality -- 2D, 3D, 4D, ...
cyclical -- we follow a cyclical pattern in switching between axes
at each level of the tree,
for 2D <x,y,x,y,x,y,...>
for 3D <x,y,z,x,y,z,...>
for 4D <x,y,z,w,x,y,z,w,...>
for 6D <x,y,z,w,s,t,x,y,z,w,s,t,...>
etc.
Point Storage -- 1 search point is stored at each internal or leaf node
Minimal -- I have eliminated as many fields as possible
from the final kd-node data structures.
The only remaining field is the stored search point
During the build process, we need some temporary extra fields for tracking.
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
// Lookup "Median of 3" pivot index from 3 tests
__constant__ unsigned int g_m3Table[8] =
{
// M > R | L > R | L > M Example:
//-------|-------|-------
1u, // 0 0 0 <1 2 3>, <1 2 2>, or <2 2 2> => Median @ 2nd elem
0u, // 0 0 1 <2 1 3> or <2 1 2> => Median @ 1st elem
0u, // 0 1 0 Invalid situation
2u, // 0 1 1 <3 1 2> => Median @ 3rd elem
2u, // 1 0 0 <1 3 2> => Median @ 3rd elem
0u, // 1 0 1 Invalid situation
0u, // 1 1 0 <2 3 1> or <2 2 1> => Median @ 1st elem
1u // 1 1 1 <3 2 1> => Median @ 2nd elem
};
/*---------------------------------------------------------
Name: GPU_BUILD_PHASE2
Desc: Build sub-tree (sub-range) of kd-tree
starting from specified per thread 'build item'
---------------------------------------------------------*/
__global__ void
P2_2D_BUILD_LBT
(
GPUNode_2D_LBT * lbtNodes, // OUT: lbt node list
unsigned int * pointIDs, // OUT: point indices are stored in this array
GPUNode_2D_MED * medNodes, // IN: median node list
GPUNode_2D_MED * medScratch,// IN: scratch space for temporary copying
GPU_BUILD_ITEM * buildQ, // IN: build queue (per thread)
unsigned int nPoints // IN: maximum # of points
)
{
// Local variables (shared items)
__shared__ GPU_BUILD_ITEM currBuild[P2_BUILD_THREADS_PER_BLOCK][P2_BUILD_STACK_DEPTH];
__shared__ GPUNode_2D_MED currMED[P2_BUILD_THREADS_PER_BLOCK];
__shared__ GPUNode_2D_LBT currLBT[P2_BUILD_THREADS_PER_BLOCK];
__shared__ float m3Vals[3];
// Local Variables (registers)
float pivotVal, currVal;
unsigned int currAxis, nextAxis;
unsigned int h2, currMedian, validRoot;
unsigned int currLeft, currRight;
unsigned int currStart, currEnd, currItem;
unsigned int currTarget, currFlags, outIdx, top;
unsigned int origStart, origEnd, origN, bDone;
unsigned int countBefore, countAfter, countEqual;
unsigned int startBefore, startAfter, startEqual;
unsigned int baseBefore, baseAfter, baseEqual;
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread index (global)
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * (gridDim.x * blockDim.x)) + cCol;
//----------------------------------
// Push first build item onto stack
//----------------------------------
{
unsigned int rootStart, rootEnd, rootFlags, rootAxis;
unsigned int rootN, h;
// Get root of sub-tree to process
top = 0;
currBuild[bidx][top] = buildQ[gidx];
rootStart = currBuild[bidx][top].start & NODE_INDEX_MASK;
rootEnd = currBuild[bidx][top].end & NODE_INDEX_MASK;
rootFlags = currBuild[bidx][top].flags;
rootAxis = ((rootFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
// Compute initial 2^h value at root of sub-tree
rootN = rootEnd - rootStart + 1;
h = (unsigned int)( floorf( log2f( (float)rootN ) ) );
h2 = 1<<h; // 2^h
// Reset flags at root (h2 + axis)
currBuild[bidx][top].flags = (h2 & NODE_INDEX_MASK)
| ((rootAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// Is this a valid sub-tree for us to do work on ?
validRoot = (rootEnd < rootStart) ? 0u : 1u;
if (validRoot)
{
top++; // Increment top of stack
}
}
while (top > 0)
{
//--------------------------
// Pop Build Item off stack
//--------------------------
top--;
origStart = (currBuild[bidx][top].start & NODE_INDEX_MASK);
origEnd = (currBuild[bidx][top].end & NODE_INDEX_MASK);
currTarget = currBuild[bidx][top].targetID;
currFlags = currBuild[bidx][top].flags;
currAxis = ((currFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
nextAxis = ((currAxis == 1u) ? 0u : 1u);
origN = origEnd-origStart + 1;
//----------------------------------
// Compute Left-balanced Median
//----------------------------------
{
unsigned int br, minVal, LBM;
h2 = (currFlags & NODE_INDEX_MASK);
br = origN - (h2-1); // n - (2^h-1)
minVal = min(2*br,h2);
LBM = (h2 + minVal) >> 1; // LBM = 2^h + min(2*br,2^h)
currMedian = origStart + (LBM - 1); // actual median
}
//---------------------------------------
// Partition [start,end] range on Median
//---------------------------------------
currStart = origStart;
currEnd = origEnd;
bDone = 0u;
while (! bDone)
{
//-----------------------------
// Compute Pivot
//-----------------------------
if (origN > 20)
{
//
// Use median of 3 variant (by table look-up)
//
unsigned int m_idx, testIdx, pivotIdx;
m_idx = (currStart+currEnd) >> 1; // (l+r)/2
// 3 slow reads from memory
m3Vals[0] = medNodes[currStart].pos[currAxis]; // Left
m3Vals[1] = medNodes[m_idx].pos[currAxis]; // Middle
m3Vals[2] = medNodes[currEnd].pos[currAxis]; // Right
// Compute pivot value via "Median of 3" table lookup
testIdx = ((m3Vals[1] > m3Vals[2]) << 2) // M > R test
| ((m3Vals[0] > m3Vals[2]) << 1) // L > R test
| (m3Vals[0] > m3Vals[1]); // L > M test
pivotIdx = g_m3Table[testIdx];
pivotVal = m3Vals[pivotIdx];
}
else
{
// Grab pivot from 1st element
// Slow read from main memory
pivotVal = medNodes[currStart].pos[currAxis];
}
//-------------------
// Count Nodes
//-------------------
countBefore = 0;
countEqual = 0;
countAfter = 0;
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Get current value
// Slow read from global memory
currMED[bidx] = medNodes[currItem];
currVal = currMED[bidx].pos[currAxis];
// Count # of values before and after pivot
if (currVal < pivotVal)
{
countBefore++;
}
else if (currVal > pivotVal)
{
countAfter++;
}
else
{
countEqual++;
}
// Slow write to scratch buffer
medScratch[currItem] = currMED[bidx];
}
//--------------------
// Compute Starts
//--------------------
baseBefore = currStart;
baseEqual = currStart + countBefore;
baseAfter = currStart + countBefore + countEqual;
startBefore = baseBefore;
startEqual = baseEqual;
startAfter = baseAfter;
//-------------------
// Partition Nodes
//-------------------
// partition nodes from scratch buffer
// back into actual kd-node array
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Read node from original location
// Slow read from global memory
currMED[bidx] = medScratch[currItem];
// Partition node into appropriate location
currVal = currMED[bidx].pos[currAxis];
if (currVal < pivotVal)
{
outIdx = startBefore;
startBefore++;
}
else if (currVal > pivotVal)
{
outIdx = startAfter;
startAfter++;
}
else
{
outIdx = startEqual;
startEqual++;
}
// Write node to new partitioned location
// Slow write to external memory
medNodes[outIdx] = currMED[bidx];
}
//-----------------------
// Done partitioning ?!?
//-----------------------
if (currMedian < baseEqual)
{
// Not done, iterate on {L} partition = [currStart, equalBase - 1]
currEnd = baseEqual - 1;
}
else if (currMedian >= baseAfter) // Median in after partition {R}
{
// Not done, iterate on {R} partition = range [afterBase, currEnd]
currStart = baseAfter;
}
else // Median is in median partition {M}
{
// Done, the left-balanced median is where we want it
bDone = 1u;
}
} // end while (!bDone)
//---------------------------------------
// Store Left-Balanced Median at target
//---------------------------------------
// Slow read from main memory
currMED[bidx] = medNodes[currMedian];
currLBT[bidx].pos[0] = currMED[bidx].pos[0];
currLBT[bidx].pos[1] = currMED[bidx].pos[1];
// Slow write to main memory
lbtNodes[currTarget] = currLBT[bidx];
pointIDs[currTarget] = currMED[bidx].m_searchIdx;
//---------------------------------------
// Compute Left and Right build items
//---------------------------------------
currLeft = currTarget << 1;
currRight = currLeft + 1;
if (currRight <= nPoints)
{
unsigned int rStart = currMedian+1;
unsigned int rEnd = origEnd;
// push right child onto stack
currBuild[bidx][top].start = (rStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (rEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currRight & NODE_INDEX_MASK);
//currBuild[bidx][top].flags = ((currHalf >> 1) & NODE_INDEX_MASK)
// | ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
if (currLeft <= nPoints)
{
unsigned int lStart = origStart;
unsigned int lEnd = currMedian-1;
// push left child onto stack
currBuild[bidx][top].start = (lStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (lEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currLeft & NODE_INDEX_MASK);
//currBuild[bidx][top].flags = ((currHalf >> 1) & NODE_INDEX_MASK)
// | ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
}
}
__global__ void
P2_2D_BUILD_STATS
(
GPUNode_2D_LBT * lbtNodes, // OUT: lbt node list
unsigned int * pointIDs, // OUT: point indices are stored in this array
GPU_BUILD_STATS * statsQ, // OUT: stats queue (per thread)
GPUNode_2D_MED * medNodes, // IN: median node list
GPUNode_2D_MED * medScratch,// IN: scratch space for temporary copying
GPU_BUILD_ITEM * buildQ, // IN: build queue (per thread)
unsigned int nPoints // IN: maximum # of points
)
{
// Local variables (shared items)
__shared__ GPU_BUILD_ITEM currBuild[P2_BUILD_THREADS_PER_BLOCK][P2_BUILD_STACK_DEPTH];
__shared__ GPUNode_2D_MED currMED[P2_BUILD_THREADS_PER_BLOCK];
__shared__ GPUNode_2D_LBT currLBT[P2_BUILD_THREADS_PER_BLOCK];
#ifdef _BUILD_STATS
__shared__ GPU_BUILD_STATS currStats[P2_BUILD_THREADS_PER_BLOCK];
#endif
__shared__ float m3Vals[3];
// Local Variables (registers)
float pivotVal, currVal;
unsigned int currAxis, nextAxis;
unsigned int h2, currMedian, validRoot;
unsigned int currLeft, currRight;
unsigned int currStart, currEnd, currItem;
unsigned int currTarget, currFlags, outIdx, top;
unsigned int origStart, origEnd, origN, bDone;
unsigned int countBefore, countAfter, countEqual;
unsigned int startBefore, startAfter, startEqual;
unsigned int baseBefore, baseAfter, baseEqual;
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread index (global)
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * (gridDim.x * blockDim.x)) + cCol;
#ifdef _BUILD_STATS
// Initialize Stats
currStats[bidx].cRootReads = 0;
currStats[bidx].cPivotReads = 0;
currStats[bidx].cCountReads = 0;
currStats[bidx].cCountWrites = 0;
currStats[bidx].cPartReads = 0;
currStats[bidx].cPartWrites = 0;
currStats[bidx].cStoreReads = 0;
currStats[bidx].cStoreWrites = 0;
currStats[bidx].cNodeLoops = 0;
currStats[bidx].cPartLoops = 0;
#endif
//----------------------------------
// Push first build item onto stack
//----------------------------------
{
unsigned int rootStart, rootEnd, rootFlags, rootAxis;
unsigned int rootN, h;
// Get root of sub-tree to process
top = 0;
// Slow read from main memory
currBuild[bidx][top] = buildQ[gidx];
#ifdef _BUILD_STATS
currStats[bidx].cRootReads++;
#endif
rootStart = currBuild[bidx][top].start & NODE_INDEX_MASK;
rootEnd = currBuild[bidx][top].end & NODE_INDEX_MASK;
rootFlags = currBuild[bidx][top].flags;
rootAxis = ((rootFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
// Compute initial 2^h value at root of sub-tree
rootN = rootEnd - rootStart + 1;
h = (unsigned int)( floorf( log2f( (float)rootN ) ) );
h2 = 1<<h; // 2^h
// Reset flags at root (h2 + axis)
currBuild[bidx][top].flags = (h2 & NODE_INDEX_MASK)
| ((rootAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// Is this a valid sub-tree for us to do work on ?
validRoot = (rootEnd < rootStart) ? 0u : 1u;
if (validRoot)
{
top++; // Increment top of stack
}
}
while (top > 0)
{
//--------------------------
// Pop Build Item off stack
//--------------------------
#ifdef _BUILD_STATS
currStats[bidx].cNodeLoops++;
#endif
top--;
origStart = (currBuild[bidx][top].start & NODE_INDEX_MASK);
origEnd = (currBuild[bidx][top].end & NODE_INDEX_MASK);
currTarget = currBuild[bidx][top].targetID;
currFlags = currBuild[bidx][top].flags;
currAxis = ((currFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
nextAxis = ((currAxis == 1u) ? 0u : 1u);
origN = origEnd-origStart + 1;
//----------------------------------
// Compute Left-balanced Median
//----------------------------------
{
unsigned int br, minVal, LBM;
h2 = (currFlags & NODE_INDEX_MASK);
br = origN - (h2-1); // n - (2^h-1)
minVal = min(2*br,h2);
LBM = (h2 + minVal) >> 1; // LBM = 2^h + min(2*br,2^h)
currMedian = origStart + (LBM - 1); // actual median
}
//---------------------------------------
// Partition [start,end] range on Median
//---------------------------------------
currStart = origStart;
currEnd = origEnd;
bDone = 0u;
while (! bDone)
{
#ifdef _BUILD_STATS
currStats[bidx].cPartLoops++;
#endif
//-----------------------------
// Compute Pivot
//-----------------------------
if (origN > 20)
{
//
// Use median of 3 variant (by table look-up)
//
unsigned int m_idx, testIdx, pivotIdx;
m_idx = (currStart+currEnd) >> 1; // (l+r)/2
// 3 slow reads from memory
m3Vals[0] = medNodes[currStart].pos[currAxis]; // Left
m3Vals[1] = medNodes[m_idx].pos[currAxis]; // Middle
m3Vals[2] = medNodes[currEnd].pos[currAxis]; // Right
#ifdef _BUILD_STATS
currStats[bidx].cPivotReads += 3;
#endif
// Compute pivot value via "Median of 3" table lookup
testIdx = ((m3Vals[1] > m3Vals[2]) << 2) // M > R test
| ((m3Vals[0] > m3Vals[2]) << 1) // L > R test
| (m3Vals[0] > m3Vals[1]); // L > M test
pivotIdx = g_m3Table[testIdx];
pivotVal = m3Vals[pivotIdx];
}
else
{
// Grab pivot from 1st element
// Slow read from main memory
pivotVal = medNodes[currStart].pos[currAxis];
#ifdef _BUILD_STATS
currStats[bidx].cPivotReads++;
#endif
}
//-------------------
// Count Nodes
//-------------------
countBefore = 0;
countEqual = 0;
countAfter = 0;
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Get current value
// Slow read from global memory
currMED[bidx] = medNodes[currItem];
currVal = currMED[bidx].pos[currAxis];
#ifdef _BUILD_STATS
currStats[bidx].cCountReads++;
#endif
// Count # of values before and after pivot
if (currVal < pivotVal)
{
countBefore++;
}
else if (currVal > pivotVal)
{
countAfter++;
}
else
{
countEqual++;
}
#ifdef _BUILD_STATS
currStats[bidx].cCountWrites++;
#endif
// Slow write to scratch buffer
medScratch[currItem] = currMED[bidx];
}
//--------------------
// Compute Starts
//--------------------
baseBefore = currStart;
baseEqual = currStart + countBefore;
baseAfter = currStart + countBefore + countEqual;
startBefore = baseBefore;
startEqual = baseEqual;
startAfter = baseAfter;
//-------------------
// Partition Nodes
//-------------------
// partition nodes from scratch buffer
// back into actual kd-node array
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Read node from original location
// Slow read from global memory
currMED[bidx] = medScratch[currItem];
#ifdef _BUILD_STATS
currStats[bidx].cPartReads++;
#endif
// Partition node into appropriate location
currVal = currMED[bidx].pos[currAxis];
if (currVal < pivotVal)
{
outIdx = startBefore;
startBefore++;
}
else if (currVal > pivotVal)
{
outIdx = startAfter;
startAfter++;
}
else
{
outIdx = startEqual;
startEqual++;
}
#ifdef _BUILD_STATS
currStats[bidx].cPartWrites++;
#endif
// Write node to new partitioned location
// Slow write to external memory
medNodes[outIdx] = currMED[bidx];
}
//-----------------------
// Done partitioning ?!?
//-----------------------
if (currMedian < baseEqual)
{
// Not done, iterate on {L} partition = [currStart, equalBase - 1]
currEnd = baseEqual - 1;
}
else if (currMedian >= baseAfter) // Median in after partition {R}
{
// Not done, iterate on {R} partition = range [afterBase, currEnd]
currStart = baseAfter;
}
else // Median is in median partition {M}
{
// Done, the left-balanced median is where we want it
bDone = 1u;
}
} // end while (!bDone)
//---------------------------------------
// Store Left-Balanced Median at target
//---------------------------------------
// Slow read from main memory
currMED[bidx] = medNodes[currMedian];
#ifdef _BUILD_STATS
currStats[bidx].cStoreReads++;
#endif
currLBT[bidx].pos[0] = currMED[bidx].pos[0];
currLBT[bidx].pos[1] = currMED[bidx].pos[1];
// Slow write to main memory
lbtNodes[currTarget] = currLBT[bidx];
pointIDs[currTarget] = currMED[bidx].m_searchIdx;
#ifdef _BUILD_STATS
currStats[bidx].cStoreWrites +=2;
#endif
//---------------------------------------
// Compute Left and Right build items
//---------------------------------------
currLeft = currTarget << 1;
currRight = currLeft + 1;
if (currRight <= nPoints)
{
unsigned int rStart = currMedian+1;
unsigned int rEnd = origEnd;
// push right child onto stack
currBuild[bidx][top].start = (rStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (rEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currRight & NODE_INDEX_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
if (currLeft <= nPoints)
{
unsigned int lStart = origStart;
unsigned int lEnd = currMedian-1;
// push left child onto stack
currBuild[bidx][top].start = (lStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (lEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currLeft & NODE_INDEX_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
}
#ifdef _BUILD_STATS
// Store Stats to output array
statsQ[gidx] = currStats[bidx];
#endif
}
|
b4e8c8c54585b88f9c03002958b9d5ca0989131c.cu
|
/*-----------------------------------------------------------------------------
Name: GPU_PHASE2.cu
Desc: This file contains GPU kernels for building a kd-tree
The kd-nodes are stored in a left balanced layout
Notes:
Kd-tree attributes
static -- we need to know all points "a priori" before building the kd-tree
balanced -- Tree has maximum height of O( log<2> n )
Left-Balanced tree array layout
-- The kd-nodes in the kd-tree are stored in a left-balanced tree layout
-- Given n points, We allocate n+1 nodes
-- The kd-node at index zero is ignored (wasted space)
-- The Root kd-node is always found at index 1
-- Given any node at position 'i'
-- The parent node is found at 'i/2'
-- The left child node is found at '2*i'
-- The right child node is found at '2*i+1'
d-Dimensionality -- 2D, 3D, 4D, ...
cyclical -- we follow a cyclical pattern in switching between axes
at each level of the tree,
for 2D <x,y,x,y,x,y,...>
for 3D <x,y,z,x,y,z,...>
for 4D <x,y,z,w,x,y,z,w,...>
for 6D <x,y,z,w,s,t,x,y,z,w,s,t,...>
etc.
Point Storage -- 1 search point is stored at each internal or leaf node
Minimal -- I have eliminated as many fields as possible
from the final kd-node data structures.
The only remaining field is the stored search point
During the build process, we need some temporary extra fields for tracking.
by Shawn Brown ([email protected])
-----------------------------------------------------------------------------*/
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTREE_API.h"
// Lookup "Median of 3" pivot index from 3 tests
__constant__ unsigned int g_m3Table[8] =
{
// M > R | L > R | L > M Example:
//-------|-------|-------
1u, // 0 0 0 <1 2 3>, <1 2 2>, or <2 2 2> => Median @ 2nd elem
0u, // 0 0 1 <2 1 3> or <2 1 2> => Median @ 1st elem
0u, // 0 1 0 Invalid situation
2u, // 0 1 1 <3 1 2> => Median @ 3rd elem
2u, // 1 0 0 <1 3 2> => Median @ 3rd elem
0u, // 1 0 1 Invalid situation
0u, // 1 1 0 <2 3 1> or <2 2 1> => Median @ 1st elem
1u // 1 1 1 <3 2 1> => Median @ 2nd elem
};
/*---------------------------------------------------------
Name: GPU_BUILD_PHASE2
Desc: Build sub-tree (sub-range) of kd-tree
starting from specified per thread 'build item'
---------------------------------------------------------*/
__global__ void
P2_2D_BUILD_LBT
(
GPUNode_2D_LBT * lbtNodes, // OUT: lbt node list
unsigned int * pointIDs, // OUT: point indices are stored in this array
GPUNode_2D_MED * medNodes, // IN: median node list
GPUNode_2D_MED * medScratch,// IN: scratch space for temporary copying
GPU_BUILD_ITEM * buildQ, // IN: build queue (per thread)
unsigned int nPoints // IN: maximum # of points
)
{
// Local variables (shared items)
__shared__ GPU_BUILD_ITEM currBuild[P2_BUILD_THREADS_PER_BLOCK][P2_BUILD_STACK_DEPTH];
__shared__ GPUNode_2D_MED currMED[P2_BUILD_THREADS_PER_BLOCK];
__shared__ GPUNode_2D_LBT currLBT[P2_BUILD_THREADS_PER_BLOCK];
__shared__ float m3Vals[3];
// Local Variables (registers)
float pivotVal, currVal;
unsigned int currAxis, nextAxis;
unsigned int h2, currMedian, validRoot;
unsigned int currLeft, currRight;
unsigned int currStart, currEnd, currItem;
unsigned int currTarget, currFlags, outIdx, top;
unsigned int origStart, origEnd, origN, bDone;
unsigned int countBefore, countAfter, countEqual;
unsigned int startBefore, startAfter, startEqual;
unsigned int baseBefore, baseAfter, baseEqual;
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread index (global)
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * (gridDim.x * blockDim.x)) + cCol;
//----------------------------------
// Push first build item onto stack
//----------------------------------
{
unsigned int rootStart, rootEnd, rootFlags, rootAxis;
unsigned int rootN, h;
// Get root of sub-tree to process
top = 0;
currBuild[bidx][top] = buildQ[gidx];
rootStart = currBuild[bidx][top].start & NODE_INDEX_MASK;
rootEnd = currBuild[bidx][top].end & NODE_INDEX_MASK;
rootFlags = currBuild[bidx][top].flags;
rootAxis = ((rootFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
// Compute initial 2^h value at root of sub-tree
rootN = rootEnd - rootStart + 1;
h = (unsigned int)( floorf( log2f( (float)rootN ) ) );
h2 = 1<<h; // 2^h
// Reset flags at root (h2 + axis)
currBuild[bidx][top].flags = (h2 & NODE_INDEX_MASK)
| ((rootAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// Is this a valid sub-tree for us to do work on ?
validRoot = (rootEnd < rootStart) ? 0u : 1u;
if (validRoot)
{
top++; // Increment top of stack
}
}
while (top > 0)
{
//--------------------------
// Pop Build Item off stack
//--------------------------
top--;
origStart = (currBuild[bidx][top].start & NODE_INDEX_MASK);
origEnd = (currBuild[bidx][top].end & NODE_INDEX_MASK);
currTarget = currBuild[bidx][top].targetID;
currFlags = currBuild[bidx][top].flags;
currAxis = ((currFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
nextAxis = ((currAxis == 1u) ? 0u : 1u);
origN = origEnd-origStart + 1;
//----------------------------------
// Compute Left-balanced Median
//----------------------------------
{
unsigned int br, minVal, LBM;
h2 = (currFlags & NODE_INDEX_MASK);
br = origN - (h2-1); // n - (2^h-1)
minVal = min(2*br,h2);
LBM = (h2 + minVal) >> 1; // LBM = 2^h + min(2*br,2^h)
currMedian = origStart + (LBM - 1); // actual median
}
//---------------------------------------
// Partition [start,end] range on Median
//---------------------------------------
currStart = origStart;
currEnd = origEnd;
bDone = 0u;
while (! bDone)
{
//-----------------------------
// Compute Pivot
//-----------------------------
if (origN > 20)
{
//
// Use median of 3 variant (by table look-up)
//
unsigned int m_idx, testIdx, pivotIdx;
m_idx = (currStart+currEnd) >> 1; // (l+r)/2
// 3 slow reads from memory
m3Vals[0] = medNodes[currStart].pos[currAxis]; // Left
m3Vals[1] = medNodes[m_idx].pos[currAxis]; // Middle
m3Vals[2] = medNodes[currEnd].pos[currAxis]; // Right
// Compute pivot value via "Median of 3" table lookup
testIdx = ((m3Vals[1] > m3Vals[2]) << 2) // M > R test
| ((m3Vals[0] > m3Vals[2]) << 1) // L > R test
| (m3Vals[0] > m3Vals[1]); // L > M test
pivotIdx = g_m3Table[testIdx];
pivotVal = m3Vals[pivotIdx];
}
else
{
// Grab pivot from 1st element
// Slow read from main memory
pivotVal = medNodes[currStart].pos[currAxis];
}
//-------------------
// Count Nodes
//-------------------
countBefore = 0;
countEqual = 0;
countAfter = 0;
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Get current value
// Slow read from global memory
currMED[bidx] = medNodes[currItem];
currVal = currMED[bidx].pos[currAxis];
// Count # of values before and after pivot
if (currVal < pivotVal)
{
countBefore++;
}
else if (currVal > pivotVal)
{
countAfter++;
}
else
{
countEqual++;
}
// Slow write to scratch buffer
medScratch[currItem] = currMED[bidx];
}
//--------------------
// Compute Starts
//--------------------
baseBefore = currStart;
baseEqual = currStart + countBefore;
baseAfter = currStart + countBefore + countEqual;
startBefore = baseBefore;
startEqual = baseEqual;
startAfter = baseAfter;
//-------------------
// Partition Nodes
//-------------------
// partition nodes from scratch buffer
// back into actual kd-node array
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Read node from original location
// Slow read from global memory
currMED[bidx] = medScratch[currItem];
// Partition node into appropriate location
currVal = currMED[bidx].pos[currAxis];
if (currVal < pivotVal)
{
outIdx = startBefore;
startBefore++;
}
else if (currVal > pivotVal)
{
outIdx = startAfter;
startAfter++;
}
else
{
outIdx = startEqual;
startEqual++;
}
// Write node to new partitioned location
// Slow write to external memory
medNodes[outIdx] = currMED[bidx];
}
//-----------------------
// Done partitioning ?!?
//-----------------------
if (currMedian < baseEqual)
{
// Not done, iterate on {L} partition = [currStart, equalBase - 1]
currEnd = baseEqual - 1;
}
else if (currMedian >= baseAfter) // Median in after partition {R}
{
// Not done, iterate on {R} partition = range [afterBase, currEnd]
currStart = baseAfter;
}
else // Median is in median partition {M}
{
// Done, the left-balanced median is where we want it
bDone = 1u;
}
} // end while (!bDone)
//---------------------------------------
// Store Left-Balanced Median at target
//---------------------------------------
// Slow read from main memory
currMED[bidx] = medNodes[currMedian];
currLBT[bidx].pos[0] = currMED[bidx].pos[0];
currLBT[bidx].pos[1] = currMED[bidx].pos[1];
// Slow write to main memory
lbtNodes[currTarget] = currLBT[bidx];
pointIDs[currTarget] = currMED[bidx].m_searchIdx;
//---------------------------------------
// Compute Left and Right build items
//---------------------------------------
currLeft = currTarget << 1;
currRight = currLeft + 1;
if (currRight <= nPoints)
{
unsigned int rStart = currMedian+1;
unsigned int rEnd = origEnd;
// push right child onto stack
currBuild[bidx][top].start = (rStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (rEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currRight & NODE_INDEX_MASK);
//currBuild[bidx][top].flags = ((currHalf >> 1) & NODE_INDEX_MASK)
// | ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
if (currLeft <= nPoints)
{
unsigned int lStart = origStart;
unsigned int lEnd = currMedian-1;
// push left child onto stack
currBuild[bidx][top].start = (lStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (lEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currLeft & NODE_INDEX_MASK);
//currBuild[bidx][top].flags = ((currHalf >> 1) & NODE_INDEX_MASK)
// | ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
}
}
__global__ void
P2_2D_BUILD_STATS
(
GPUNode_2D_LBT * lbtNodes, // OUT: lbt node list
unsigned int * pointIDs, // OUT: point indices are stored in this array
GPU_BUILD_STATS * statsQ, // OUT: stats queue (per thread)
GPUNode_2D_MED * medNodes, // IN: median node list
GPUNode_2D_MED * medScratch,// IN: scratch space for temporary copying
GPU_BUILD_ITEM * buildQ, // IN: build queue (per thread)
unsigned int nPoints // IN: maximum # of points
)
{
// Local variables (shared items)
__shared__ GPU_BUILD_ITEM currBuild[P2_BUILD_THREADS_PER_BLOCK][P2_BUILD_STACK_DEPTH];
__shared__ GPUNode_2D_MED currMED[P2_BUILD_THREADS_PER_BLOCK];
__shared__ GPUNode_2D_LBT currLBT[P2_BUILD_THREADS_PER_BLOCK];
#ifdef _BUILD_STATS
__shared__ GPU_BUILD_STATS currStats[P2_BUILD_THREADS_PER_BLOCK];
#endif
__shared__ float m3Vals[3];
// Local Variables (registers)
float pivotVal, currVal;
unsigned int currAxis, nextAxis;
unsigned int h2, currMedian, validRoot;
unsigned int currLeft, currRight;
unsigned int currStart, currEnd, currItem;
unsigned int currTarget, currFlags, outIdx, top;
unsigned int origStart, origEnd, origN, bDone;
unsigned int countBefore, countAfter, countEqual;
unsigned int startBefore, startAfter, startEqual;
unsigned int baseBefore, baseAfter, baseEqual;
/*-----------------------
Compute Thread Column
-----------------------*/
// Block thread index (local)
const int bidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Grid thread index (global)
const int cRow = (blockIdx.y * blockDim.y) + threadIdx.y; // thread row in grid of blocks
const int cCol = (blockIdx.x * blockDim.x) + threadIdx.x; // thread column in grid of blocks
const int gidx = (cRow * (gridDim.x * blockDim.x)) + cCol;
#ifdef _BUILD_STATS
// Initialize Stats
currStats[bidx].cRootReads = 0;
currStats[bidx].cPivotReads = 0;
currStats[bidx].cCountReads = 0;
currStats[bidx].cCountWrites = 0;
currStats[bidx].cPartReads = 0;
currStats[bidx].cPartWrites = 0;
currStats[bidx].cStoreReads = 0;
currStats[bidx].cStoreWrites = 0;
currStats[bidx].cNodeLoops = 0;
currStats[bidx].cPartLoops = 0;
#endif
//----------------------------------
// Push first build item onto stack
//----------------------------------
{
unsigned int rootStart, rootEnd, rootFlags, rootAxis;
unsigned int rootN, h;
// Get root of sub-tree to process
top = 0;
// Slow read from main memory
currBuild[bidx][top] = buildQ[gidx];
#ifdef _BUILD_STATS
currStats[bidx].cRootReads++;
#endif
rootStart = currBuild[bidx][top].start & NODE_INDEX_MASK;
rootEnd = currBuild[bidx][top].end & NODE_INDEX_MASK;
rootFlags = currBuild[bidx][top].flags;
rootAxis = ((rootFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
// Compute initial 2^h value at root of sub-tree
rootN = rootEnd - rootStart + 1;
h = (unsigned int)( floorf( log2f( (float)rootN ) ) );
h2 = 1<<h; // 2^h
// Reset flags at root (h2 + axis)
currBuild[bidx][top].flags = (h2 & NODE_INDEX_MASK)
| ((rootAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// Is this a valid sub-tree for us to do work on ?
validRoot = (rootEnd < rootStart) ? 0u : 1u;
if (validRoot)
{
top++; // Increment top of stack
}
}
while (top > 0)
{
//--------------------------
// Pop Build Item off stack
//--------------------------
#ifdef _BUILD_STATS
currStats[bidx].cNodeLoops++;
#endif
top--;
origStart = (currBuild[bidx][top].start & NODE_INDEX_MASK);
origEnd = (currBuild[bidx][top].end & NODE_INDEX_MASK);
currTarget = currBuild[bidx][top].targetID;
currFlags = currBuild[bidx][top].flags;
currAxis = ((currFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT);
nextAxis = ((currAxis == 1u) ? 0u : 1u);
origN = origEnd-origStart + 1;
//----------------------------------
// Compute Left-balanced Median
//----------------------------------
{
unsigned int br, minVal, LBM;
h2 = (currFlags & NODE_INDEX_MASK);
br = origN - (h2-1); // n - (2^h-1)
minVal = min(2*br,h2);
LBM = (h2 + minVal) >> 1; // LBM = 2^h + min(2*br,2^h)
currMedian = origStart + (LBM - 1); // actual median
}
//---------------------------------------
// Partition [start,end] range on Median
//---------------------------------------
currStart = origStart;
currEnd = origEnd;
bDone = 0u;
while (! bDone)
{
#ifdef _BUILD_STATS
currStats[bidx].cPartLoops++;
#endif
//-----------------------------
// Compute Pivot
//-----------------------------
if (origN > 20)
{
//
// Use median of 3 variant (by table look-up)
//
unsigned int m_idx, testIdx, pivotIdx;
m_idx = (currStart+currEnd) >> 1; // (l+r)/2
// 3 slow reads from memory
m3Vals[0] = medNodes[currStart].pos[currAxis]; // Left
m3Vals[1] = medNodes[m_idx].pos[currAxis]; // Middle
m3Vals[2] = medNodes[currEnd].pos[currAxis]; // Right
#ifdef _BUILD_STATS
currStats[bidx].cPivotReads += 3;
#endif
// Compute pivot value via "Median of 3" table lookup
testIdx = ((m3Vals[1] > m3Vals[2]) << 2) // M > R test
| ((m3Vals[0] > m3Vals[2]) << 1) // L > R test
| (m3Vals[0] > m3Vals[1]); // L > M test
pivotIdx = g_m3Table[testIdx];
pivotVal = m3Vals[pivotIdx];
}
else
{
// Grab pivot from 1st element
// Slow read from main memory
pivotVal = medNodes[currStart].pos[currAxis];
#ifdef _BUILD_STATS
currStats[bidx].cPivotReads++;
#endif
}
//-------------------
// Count Nodes
//-------------------
countBefore = 0;
countEqual = 0;
countAfter = 0;
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Get current value
// Slow read from global memory
currMED[bidx] = medNodes[currItem];
currVal = currMED[bidx].pos[currAxis];
#ifdef _BUILD_STATS
currStats[bidx].cCountReads++;
#endif
// Count # of values before and after pivot
if (currVal < pivotVal)
{
countBefore++;
}
else if (currVal > pivotVal)
{
countAfter++;
}
else
{
countEqual++;
}
#ifdef _BUILD_STATS
currStats[bidx].cCountWrites++;
#endif
// Slow write to scratch buffer
medScratch[currItem] = currMED[bidx];
}
//--------------------
// Compute Starts
//--------------------
baseBefore = currStart;
baseEqual = currStart + countBefore;
baseAfter = currStart + countBefore + countEqual;
startBefore = baseBefore;
startEqual = baseEqual;
startAfter = baseAfter;
//-------------------
// Partition Nodes
//-------------------
// partition nodes from scratch buffer
// back into actual kd-node array
for (currItem = currStart; currItem <= currEnd; currItem++)
{
// Read node from original location
// Slow read from global memory
currMED[bidx] = medScratch[currItem];
#ifdef _BUILD_STATS
currStats[bidx].cPartReads++;
#endif
// Partition node into appropriate location
currVal = currMED[bidx].pos[currAxis];
if (currVal < pivotVal)
{
outIdx = startBefore;
startBefore++;
}
else if (currVal > pivotVal)
{
outIdx = startAfter;
startAfter++;
}
else
{
outIdx = startEqual;
startEqual++;
}
#ifdef _BUILD_STATS
currStats[bidx].cPartWrites++;
#endif
// Write node to new partitioned location
// Slow write to external memory
medNodes[outIdx] = currMED[bidx];
}
//-----------------------
// Done partitioning ?!?
//-----------------------
if (currMedian < baseEqual)
{
// Not done, iterate on {L} partition = [currStart, equalBase - 1]
currEnd = baseEqual - 1;
}
else if (currMedian >= baseAfter) // Median in after partition {R}
{
// Not done, iterate on {R} partition = range [afterBase, currEnd]
currStart = baseAfter;
}
else // Median is in median partition {M}
{
// Done, the left-balanced median is where we want it
bDone = 1u;
}
} // end while (!bDone)
//---------------------------------------
// Store Left-Balanced Median at target
//---------------------------------------
// Slow read from main memory
currMED[bidx] = medNodes[currMedian];
#ifdef _BUILD_STATS
currStats[bidx].cStoreReads++;
#endif
currLBT[bidx].pos[0] = currMED[bidx].pos[0];
currLBT[bidx].pos[1] = currMED[bidx].pos[1];
// Slow write to main memory
lbtNodes[currTarget] = currLBT[bidx];
pointIDs[currTarget] = currMED[bidx].m_searchIdx;
#ifdef _BUILD_STATS
currStats[bidx].cStoreWrites +=2;
#endif
//---------------------------------------
// Compute Left and Right build items
//---------------------------------------
currLeft = currTarget << 1;
currRight = currLeft + 1;
if (currRight <= nPoints)
{
unsigned int rStart = currMedian+1;
unsigned int rEnd = origEnd;
// push right child onto stack
currBuild[bidx][top].start = (rStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (rEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currRight & NODE_INDEX_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
if (currLeft <= nPoints)
{
unsigned int lStart = origStart;
unsigned int lEnd = currMedian-1;
// push left child onto stack
currBuild[bidx][top].start = (lStart & NODE_INDEX_MASK);
currBuild[bidx][top].end = (lEnd & NODE_INDEX_MASK);
currBuild[bidx][top].targetID = (currLeft & NODE_INDEX_MASK);
currBuild[bidx][top].flags = ((h2 >> 1) & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
top++;
}
}
#ifdef _BUILD_STATS
// Store Stats to output array
statsQ[gidx] = currStats[bidx];
#endif
}
|
542aa455eaeb0279b58de5ccdcb7e96c95293988.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
//hipMemPrefetchAsync(a, size, deviceId);
//hipMemPrefetchAsync(b, size, deviceId);
//hipMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 0, c, N);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
|
542aa455eaeb0279b58de5ccdcb7e96c95293988.cu
|
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
//cudaMemPrefetchAsync(a, size, deviceId);
//cudaMemPrefetchAsync(b, size, deviceId);
//cudaMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
initWith<<<numberOfBlocks, threadsPerBlock>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(0, c, N);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
2373d3861398fe03d904fa80f0a4e12e2f37b3c1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "zupdate_stencil.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *zx = NULL;
hipMalloc(&zx, XSIZE*YSIZE);
float *zy = NULL;
hipMalloc(&zy, XSIZE*YSIZE);
float *zoutx = NULL;
hipMalloc(&zoutx, XSIZE*YSIZE);
float *zouty = NULL;
hipMalloc(&zouty, XSIZE*YSIZE);
float *g = NULL;
hipMalloc(&g, XSIZE*YSIZE);
float tau = 1;
float invlambda = 1;
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
zupdate_stencil), dim3(gridBlock),dim3(threadBlock), 0, 0, zx,zy,zoutx,zouty,g,tau,invlambda,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
zupdate_stencil), dim3(gridBlock),dim3(threadBlock), 0, 0, zx,zy,zoutx,zouty,g,tau,invlambda,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
zupdate_stencil), dim3(gridBlock),dim3(threadBlock), 0, 0, zx,zy,zoutx,zouty,g,tau,invlambda,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
2373d3861398fe03d904fa80f0a4e12e2f37b3c1.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "zupdate_stencil.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *zx = NULL;
cudaMalloc(&zx, XSIZE*YSIZE);
float *zy = NULL;
cudaMalloc(&zy, XSIZE*YSIZE);
float *zoutx = NULL;
cudaMalloc(&zoutx, XSIZE*YSIZE);
float *zouty = NULL;
cudaMalloc(&zouty, XSIZE*YSIZE);
float *g = NULL;
cudaMalloc(&g, XSIZE*YSIZE);
float tau = 1;
float invlambda = 1;
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
zupdate_stencil<<<gridBlock,threadBlock>>>(zx,zy,zoutx,zouty,g,tau,invlambda,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
zupdate_stencil<<<gridBlock,threadBlock>>>(zx,zy,zoutx,zouty,g,tau,invlambda,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
zupdate_stencil<<<gridBlock,threadBlock>>>(zx,zy,zoutx,zouty,g,tau,invlambda,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9ca0727b045e8af3e0375d7b901776aaf29d90b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Device code. */
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include "gauss_eliminate.h"
__global__ void gauss_eliminate_kernel(float *U, int k, int num_elements)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= k+1){
U[num_elements * k + tid] = (float)(U[num_elements * k + tid] / U[num_elements * k + k]);
}
if (tid == k)
U[num_elements*k+k] = 1;
__syncthreads();
if (tid >= k+1){
for(int j = k+1; j < num_elements; j++)
{
U[num_elements * tid + j] -= U[num_elements * tid + k] * U[num_elements * k + j];
}
U[num_elements * tid + k] = 0;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
9ca0727b045e8af3e0375d7b901776aaf29d90b7.cu
|
/* Device code. */
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include "gauss_eliminate.h"
__global__ void gauss_eliminate_kernel(float *U, int k, int num_elements)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid >= k+1){
U[num_elements * k + tid] = (float)(U[num_elements * k + tid] / U[num_elements * k + k]);
}
if (tid == k)
U[num_elements*k+k] = 1;
__syncthreads();
if (tid >= k+1){
for(int j = k+1; j < num_elements; j++)
{
U[num_elements * tid + j] -= U[num_elements * tid + k] * U[num_elements * k + j];
}
U[num_elements * tid + k] = 0;
}
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
8648a89c651ed3188df6ee3945eec98f3f0c4511.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file multiclass_metric.cc
* \brief evaluation metrics for multiclass classification.
* \author Kailong Chen, Tianqi Chen
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <atomic>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#include "../common/threading_utils.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(multiclass_metric);
template <typename EvalRowPolicy>
class MultiClassMetricsReduction {
void CheckLabelError(int32_t label_error, size_t n_class) const {
CHECK(label_error >= 0 && label_error < static_cast<int32_t>(n_class))
<< "MultiClassEvaluation: label must be in [0, num_class),"
<< " num_class=" << n_class << " but found " << label_error << " in label";
}
public:
MultiClassMetricsReduction() = default;
PackedReduceResult
CpuReduceMetrics(const HostDeviceVector<bst_float> &weights,
const HostDeviceVector<bst_float> &labels,
const HostDeviceVector<bst_float> &preds,
const size_t n_class, int32_t n_threads) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
std::atomic<int> label_error {0};
bool const is_null_weight = weights.Size() == 0;
std::vector<double> scores_tloc(n_threads, 0);
std::vector<double> weights_tloc(n_threads, 0);
common::ParallelFor(ndata, n_threads, [&](size_t idx) {
bst_float weight = is_null_weight ? 1.0f : h_weights[idx];
auto label = static_cast<int>(h_labels[idx]);
if (label >= 0 && label < static_cast<int>(n_class)) {
auto t_idx = omp_get_thread_num();
scores_tloc[t_idx] +=
EvalRowPolicy::EvalRow(label, h_preds.data() + idx * n_class,
n_class) *
weight;
weights_tloc[t_idx] += weight;
} else {
label_error = label;
}
});
double residue_sum =
std::accumulate(scores_tloc.cbegin(), scores_tloc.cend(), 0.0);
double weights_sum =
std::accumulate(weights_tloc.cbegin(), weights_tloc.cend(), 0.0);
CheckLabelError(label_error, n_class);
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds,
const size_t n_class) {
size_t n_data = labels.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_labels = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto s_label_error = label_error_.GetSpan<int32_t>(1);
s_label_error[0] = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = 0;
auto label = static_cast<int>(s_labels[idx]);
if (label >= 0 && label < static_cast<int32_t>(n_class)) {
residue = EvalRowPolicy::EvalRow(
label, &s_preds[idx * n_class], n_class) * weight;
} else {
s_label_error[0] = label;
}
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
CheckLabelError(s_label_error[0], n_class);
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
size_t n_class,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result =
CpuReduceMetrics(weights, labels, preds, n_class, tparam.Threads());
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = tparam.gpu_id;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(hipSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds, n_class);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
#if defined(XGBOOST_USE_CUDA)
dh::PinnedMemory label_error_;
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
/*!
* \brief base class of multi-class evaluation
* \tparam Derived the name of subclass
*/
template<typename Derived>
struct EvalMClassBase : public Metric {
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
bool distributed) override {
if (info.labels.Size() == 0) {
CHECK_EQ(preds.Size(), 0);
} else {
CHECK(preds.Size() % info.labels.Size() == 0) << "label and prediction size not match";
}
double dat[2] { 0.0, 0.0 };
if (info.labels.Size() != 0) {
const size_t nclass = preds.Size() / info.labels.Size();
CHECK_GE(nclass, 1U)
<< "mlogloss and merror are only used for multi-class classification,"
<< " use logloss for binary classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, nclass, info.weights_, *info.labels.Data(), preds);
dat[0] = result.Residue();
dat[1] = result.Weights();
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Derived::GetFinal(dat[0], dat[1]);
}
/*!
* \brief to be implemented by subclass,
* get evaluation result from one row
* \param label label of current instance
* \param pred prediction value of current instance
* \param nclass number of class in the prediction
*/
XGBOOST_DEVICE static bst_float EvalRow(int label,
const bst_float *pred,
size_t nclass);
/*!
* \brief to be overridden by subclass, final transformation
* \param esum the sum statistics returned by EvalRow
* \param wsum sum of weight
*/
inline static double GetFinal(double esum, double wsum) {
return esum / wsum;
}
private:
MultiClassMetricsReduction<Derived> reducer_;
// used to store error message
const char *error_msg_;
};
/*! \brief match error */
struct EvalMatchError : public EvalMClassBase<EvalMatchError> {
const char* Name() const override {
return "merror";
}
XGBOOST_DEVICE static bst_float EvalRow(int label,
const bst_float *pred,
size_t nclass) {
return common::FindMaxIndex(pred, pred + nclass) != pred + static_cast<int>(label);
}
};
/*! \brief match error */
struct EvalMultiLogLoss : public EvalMClassBase<EvalMultiLogLoss> {
const char* Name() const override {
return "mlogloss";
}
XGBOOST_DEVICE static bst_float EvalRow(int label,
const bst_float *pred,
size_t nclass) {
const bst_float eps = 1e-16f;
auto k = static_cast<size_t>(label);
if (pred[k] > eps) {
return -::log(pred[k]);
} else {
return -::log(eps);
}
}
};
XGBOOST_REGISTER_METRIC(MatchError, "merror")
.describe("Multiclass classification error.")
.set_body([](const char* param) { return new EvalMatchError(); });
XGBOOST_REGISTER_METRIC(MultiLogLoss, "mlogloss")
.describe("Multiclass negative loglikelihood.")
.set_body([](const char* param) { return new EvalMultiLogLoss(); });
} // namespace metric
} // namespace xgboost
|
8648a89c651ed3188df6ee3945eec98f3f0c4511.cu
|
/*!
* Copyright 2015-2019 by Contributors
* \file multiclass_metric.cc
* \brief evaluation metrics for multiclass classification.
* \author Kailong Chen, Tianqi Chen
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <atomic>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#include "../common/threading_utils.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(multiclass_metric);
template <typename EvalRowPolicy>
class MultiClassMetricsReduction {
void CheckLabelError(int32_t label_error, size_t n_class) const {
CHECK(label_error >= 0 && label_error < static_cast<int32_t>(n_class))
<< "MultiClassEvaluation: label must be in [0, num_class),"
<< " num_class=" << n_class << " but found " << label_error << " in label";
}
public:
MultiClassMetricsReduction() = default;
PackedReduceResult
CpuReduceMetrics(const HostDeviceVector<bst_float> &weights,
const HostDeviceVector<bst_float> &labels,
const HostDeviceVector<bst_float> &preds,
const size_t n_class, int32_t n_threads) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
std::atomic<int> label_error {0};
bool const is_null_weight = weights.Size() == 0;
std::vector<double> scores_tloc(n_threads, 0);
std::vector<double> weights_tloc(n_threads, 0);
common::ParallelFor(ndata, n_threads, [&](size_t idx) {
bst_float weight = is_null_weight ? 1.0f : h_weights[idx];
auto label = static_cast<int>(h_labels[idx]);
if (label >= 0 && label < static_cast<int>(n_class)) {
auto t_idx = omp_get_thread_num();
scores_tloc[t_idx] +=
EvalRowPolicy::EvalRow(label, h_preds.data() + idx * n_class,
n_class) *
weight;
weights_tloc[t_idx] += weight;
} else {
label_error = label;
}
});
double residue_sum =
std::accumulate(scores_tloc.cbegin(), scores_tloc.cend(), 0.0);
double weights_sum =
std::accumulate(weights_tloc.cbegin(), weights_tloc.cend(), 0.0);
CheckLabelError(label_error, n_class);
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds,
const size_t n_class) {
size_t n_data = labels.Size();
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_labels = labels.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
bool const is_null_weight = weights.Size() == 0;
auto s_label_error = label_error_.GetSpan<int32_t>(1);
s_label_error[0] = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = 0;
auto label = static_cast<int>(s_labels[idx]);
if (label >= 0 && label < static_cast<int32_t>(n_class)) {
residue = EvalRowPolicy::EvalRow(
label, &s_preds[idx * n_class], n_class) * weight;
} else {
s_label_error[0] = label;
}
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
CheckLabelError(s_label_error[0], n_class);
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &tparam,
int device,
size_t n_class,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (device < 0) {
result =
CpuReduceMetrics(weights, labels, preds, n_class, tparam.Threads());
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
device_ = tparam.gpu_id;
preds.SetDevice(device_);
labels.SetDevice(device_);
weights.SetDevice(device_);
dh::safe_cuda(cudaSetDevice(device_));
result = DeviceReduceMetrics(weights, labels, preds, n_class);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
#if defined(XGBOOST_USE_CUDA)
dh::PinnedMemory label_error_;
int device_{-1};
#endif // defined(XGBOOST_USE_CUDA)
};
/*!
* \brief base class of multi-class evaluation
* \tparam Derived the name of subclass
*/
template<typename Derived>
struct EvalMClassBase : public Metric {
double Eval(const HostDeviceVector<float> &preds, const MetaInfo &info,
bool distributed) override {
if (info.labels.Size() == 0) {
CHECK_EQ(preds.Size(), 0);
} else {
CHECK(preds.Size() % info.labels.Size() == 0) << "label and prediction size not match";
}
double dat[2] { 0.0, 0.0 };
if (info.labels.Size() != 0) {
const size_t nclass = preds.Size() / info.labels.Size();
CHECK_GE(nclass, 1U)
<< "mlogloss and merror are only used for multi-class classification,"
<< " use logloss for binary classification";
int device = tparam_->gpu_id;
auto result =
reducer_.Reduce(*tparam_, device, nclass, info.weights_, *info.labels.Data(), preds);
dat[0] = result.Residue();
dat[1] = result.Weights();
}
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Derived::GetFinal(dat[0], dat[1]);
}
/*!
* \brief to be implemented by subclass,
* get evaluation result from one row
* \param label label of current instance
* \param pred prediction value of current instance
* \param nclass number of class in the prediction
*/
XGBOOST_DEVICE static bst_float EvalRow(int label,
const bst_float *pred,
size_t nclass);
/*!
* \brief to be overridden by subclass, final transformation
* \param esum the sum statistics returned by EvalRow
* \param wsum sum of weight
*/
inline static double GetFinal(double esum, double wsum) {
return esum / wsum;
}
private:
MultiClassMetricsReduction<Derived> reducer_;
// used to store error message
const char *error_msg_;
};
/*! \brief match error */
struct EvalMatchError : public EvalMClassBase<EvalMatchError> {
const char* Name() const override {
return "merror";
}
XGBOOST_DEVICE static bst_float EvalRow(int label,
const bst_float *pred,
size_t nclass) {
return common::FindMaxIndex(pred, pred + nclass) != pred + static_cast<int>(label);
}
};
/*! \brief match error */
struct EvalMultiLogLoss : public EvalMClassBase<EvalMultiLogLoss> {
const char* Name() const override {
return "mlogloss";
}
XGBOOST_DEVICE static bst_float EvalRow(int label,
const bst_float *pred,
size_t nclass) {
const bst_float eps = 1e-16f;
auto k = static_cast<size_t>(label);
if (pred[k] > eps) {
return -std::log(pred[k]);
} else {
return -std::log(eps);
}
}
};
XGBOOST_REGISTER_METRIC(MatchError, "merror")
.describe("Multiclass classification error.")
.set_body([](const char* param) { return new EvalMatchError(); });
XGBOOST_REGISTER_METRIC(MultiLogLoss, "mlogloss")
.describe("Multiclass negative loglikelihood.")
.set_body([](const char* param) { return new EvalMultiLogLoss(); });
} // namespace metric
} // namespace xgboost
|
eeb35ad8061863adc1e4173807569136b41fbc45.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if (c >= numCols || r >= numRows)
return;
// Use shared memory to store filter
extern __shared__ float shared_filter[];
int threads_per_block = blockDim.x * blockDim.y;
// Number of filter entries each thread needs to copy
int filters_per_thread = (int)ceil((double)filterWidth * filterWidth / threads_per_block);
// Thread index inside the block
int index = threadIdx.y * blockDim.x + threadIdx.x;
index = index * filters_per_thread;
for (int i = 0; i < filters_per_thread; i++) {
if (index < filterWidth * filterWidth) {
shared_filter[index] = filter[index];
index++;
} else {
break;
}
}
__syncthreads();
float result = 0;
// For every value in the filter around the pixel (c, r)
for (int filter_r = 0; filter_r < filterWidth; filter_r++) {
for (int filter_c = 0; filter_c < filterWidth; filter_c++) {
// Find the global image position for this filter position
// Ensure image_r in [0, numRows - 1]; image_c in [0, numCols - 1];
int image_r = min(max(r + filter_r - filterWidth / 2, 0), numRows - 1);
int image_c = min(max(c + filter_c - filterWidth / 2, 0), numCols - 1);
// Get the pixel value (image_r, image_c)
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
// Get the filter value in (filter_r, filter_c)
float filter_value = shared_filter[filter_r * filterWidth + filter_c];
result += image_value * filter_value;
}
}
outputChannel[r * numCols + c] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if (c >= numCols || r >= numRows)
return;
int thread_1D_pos = r * numCols + c;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(double) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(double) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((int)ceil((float)numCols / blockSize.x), (int)ceil((float)numRows / blockSize.y));
// Shared memory size (in byte) for each thread block
int shared_mem_size = filterWidth * filterWidth * sizeof(float);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_mem_size, 0, d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_mem_size, 0, d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), shared_mem_size, 0, d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_filter));
}
|
eeb35ad8061863adc1e4173807569136b41fbc45.cu
|
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if (c >= numCols || r >= numRows)
return;
// Use shared memory to store filter
extern __shared__ float shared_filter[];
int threads_per_block = blockDim.x * blockDim.y;
// Number of filter entries each thread needs to copy
int filters_per_thread = (int)ceil((double)filterWidth * filterWidth / threads_per_block);
// Thread index inside the block
int index = threadIdx.y * blockDim.x + threadIdx.x;
index = index * filters_per_thread;
for (int i = 0; i < filters_per_thread; i++) {
if (index < filterWidth * filterWidth) {
shared_filter[index] = filter[index];
index++;
} else {
break;
}
}
__syncthreads();
float result = 0;
// For every value in the filter around the pixel (c, r)
for (int filter_r = 0; filter_r < filterWidth; filter_r++) {
for (int filter_c = 0; filter_c < filterWidth; filter_c++) {
// Find the global image position for this filter position
// Ensure image_r in [0, numRows - 1]; image_c in [0, numCols - 1];
int image_r = min(max(r + filter_r - filterWidth / 2, 0), numRows - 1);
int image_c = min(max(c + filter_c - filterWidth / 2, 0), numCols - 1);
// Get the pixel value (image_r, image_c)
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
// Get the filter value in (filter_r, filter_c)
float filter_value = shared_filter[filter_r * filterWidth + filter_c];
result += image_value * filter_value;
}
}
outputChannel[r * numCols + c] = result;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int c = blockIdx.x * blockDim.x + threadIdx.x;
int r = blockIdx.y * blockDim.y + threadIdx.y;
if (c >= numCols || r >= numRows)
return;
int thread_1D_pos = r * numCols + c;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(double) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(double) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(32, 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((int)ceil((float)numCols / blockSize.x), (int)ceil((float)numRows / blockSize.y));
// Shared memory size (in byte) for each thread block
int shared_mem_size = filterWidth * filterWidth * sizeof(float);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA,
numRows,
numCols,
d_red,
d_green,
d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize, shared_mem_size>>>(d_red,
d_redBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize, shared_mem_size>>>(d_green,
d_greenBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize, shared_mem_size>>>(d_blue,
d_blueBlurred,
numRows,
numCols,
d_filter,
filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
1021d0d9a7326bfb7d5ce15c6a67455ad8f7819d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
#include <inttypes.h>
#include "nikola-unsimplified.h"
extern "C" __global__ void kern5(int32_t* arr_16, int32_t arr_1dim7,
double* vec_alloca_28,
int32_t vec_alloca_2dim9, int32_t x_010)
{
for (int32_t i_3 = blockIdx.x * blockDim.x + threadIdx.x; i_3 < x_010;
i_3 += blockDim.x * gridDim.x) {
int32_t x_812;
uint32_t x_813;
uint8_t x_814;
x_812 = 0;
x_813 = 0U;
x_814 = 1;
if (30 > 0) {
for (int i11 = 0; i11 < 30; ++i11) {
uint32_t v_126_15;
uint8_t v_32_16;
uint32_t ifte_result17;
uint8_t ifte_result18;
v_126_15 = (uint32_t) (int32_t) arr_16[i_3 % (x_010 >
(arr_1dim7 >
x_010 ? x_010 : arr_1dim7) ? arr_1dim7 >
x_010 ? x_010 : arr_1dim7 : x_010)];
v_32_16 = ((int32_t) (int32_t) (v_126_15 ^ v_126_15 >> 1U) &
1 << x_812) != 0;
if (v_32_16) {
uint32_t v_37_19;
uint32_t ifte_result20;
uint8_t ifte_result21;
v_37_19 = 1U << 29U - (uint32_t) x_812;
if (x_814) {
ifte_result20 = v_37_19 ^ x_813;
ifte_result21 = v_32_16;
} else {
ifte_result20 = v_37_19;
ifte_result21 = v_32_16;
}
ifte_result17 = ifte_result20;
ifte_result18 = ifte_result21;
} else {
ifte_result17 = x_813;
ifte_result18 = x_814;
}
x_812 = 1 + x_812;
x_813 = ifte_result17;
x_814 = ifte_result18;
}
}
vec_alloca_28[i_3] = (double) (int32_t) x_813 / (double) (1 << 30);
}
}
void gc(void** allocs, int* marks, int nallocs)
{
for (int i = 0; i < nallocs; ++i) {
if (marks[i] == 0) {
hipFree((char*) allocs[i]);
allocs[i] = NULL;
}
marks[i] = 0;
}
}
void mark(void** allocs, int* marks, int nallocs, void* alloc)
{
for (int i = 0; i < nallocs; ++i) {
if (allocs[i] == alloc) {
marks[i] = 1;
return;
}
}
}
hipError_t sobol_nikola_unsimplified(int32_t x_01, int32_t* arr_12, int32_t arr_1dim3,
double** ptr_resultparam22, int32_t* scalar_resultparam23)
{
void* allocs[1];
int marks[1];
int nallocs = 0;
double* alloc4 = NULL;
if (hipMalloc(&alloc4, x_01 * sizeof(double)) != hipSuccess)
goto done;
allocs[nallocs] = (void*) alloc4;
marks[nallocs++] = 0;
{
dim3 gdims;
dim3 tdims;
gdims.x = 128;
gdims.y = 1;
gdims.z = 1;
tdims.x = 480;
tdims.y = 1;
tdims.z = 1;
hipLaunchKernelGGL(( kern5), dim3(gdims), dim3(tdims), 0, 0, arr_12, arr_1dim3, alloc4, x_01, x_01);
}
mark(allocs, marks, nallocs, alloc4);
*ptr_resultparam22 = alloc4;
*scalar_resultparam23 = x_01;
done:
gc(allocs, marks, nallocs);
return hipGetLastError();
}
|
1021d0d9a7326bfb7d5ce15c6a67455ad8f7819d.cu
|
#include "cuda.h"
#include "cuda_runtime_api.h"
#include <inttypes.h>
#include "nikola-unsimplified.h"
extern "C" __global__ void kern5(int32_t* arr_16, int32_t arr_1dim7,
double* vec_alloca_28,
int32_t vec_alloca_2dim9, int32_t x_010)
{
for (int32_t i_3 = blockIdx.x * blockDim.x + threadIdx.x; i_3 < x_010;
i_3 += blockDim.x * gridDim.x) {
int32_t x_812;
uint32_t x_813;
uint8_t x_814;
x_812 = 0;
x_813 = 0U;
x_814 = 1;
if (30 > 0) {
for (int i11 = 0; i11 < 30; ++i11) {
uint32_t v_126_15;
uint8_t v_32_16;
uint32_t ifte_result17;
uint8_t ifte_result18;
v_126_15 = (uint32_t) (int32_t) arr_16[i_3 % (x_010 >
(arr_1dim7 >
x_010 ? x_010 : arr_1dim7) ? arr_1dim7 >
x_010 ? x_010 : arr_1dim7 : x_010)];
v_32_16 = ((int32_t) (int32_t) (v_126_15 ^ v_126_15 >> 1U) &
1 << x_812) != 0;
if (v_32_16) {
uint32_t v_37_19;
uint32_t ifte_result20;
uint8_t ifte_result21;
v_37_19 = 1U << 29U - (uint32_t) x_812;
if (x_814) {
ifte_result20 = v_37_19 ^ x_813;
ifte_result21 = v_32_16;
} else {
ifte_result20 = v_37_19;
ifte_result21 = v_32_16;
}
ifte_result17 = ifte_result20;
ifte_result18 = ifte_result21;
} else {
ifte_result17 = x_813;
ifte_result18 = x_814;
}
x_812 = 1 + x_812;
x_813 = ifte_result17;
x_814 = ifte_result18;
}
}
vec_alloca_28[i_3] = (double) (int32_t) x_813 / (double) (1 << 30);
}
}
void gc(void** allocs, int* marks, int nallocs)
{
for (int i = 0; i < nallocs; ++i) {
if (marks[i] == 0) {
cudaFree((char*) allocs[i]);
allocs[i] = NULL;
}
marks[i] = 0;
}
}
void mark(void** allocs, int* marks, int nallocs, void* alloc)
{
for (int i = 0; i < nallocs; ++i) {
if (allocs[i] == alloc) {
marks[i] = 1;
return;
}
}
}
cudaError_t sobol_nikola_unsimplified(int32_t x_01, int32_t* arr_12, int32_t arr_1dim3,
double** ptr_resultparam22, int32_t* scalar_resultparam23)
{
void* allocs[1];
int marks[1];
int nallocs = 0;
double* alloc4 = NULL;
if (cudaMalloc(&alloc4, x_01 * sizeof(double)) != cudaSuccess)
goto done;
allocs[nallocs] = (void*) alloc4;
marks[nallocs++] = 0;
{
dim3 gdims;
dim3 tdims;
gdims.x = 128;
gdims.y = 1;
gdims.z = 1;
tdims.x = 480;
tdims.y = 1;
tdims.z = 1;
kern5<<<gdims, tdims>>>(arr_12, arr_1dim3, alloc4, x_01, x_01);
}
mark(allocs, marks, nallocs, alloc4);
*ptr_resultparam22 = alloc4;
*scalar_resultparam23 = x_01;
done:
gc(allocs, marks, nallocs);
return cudaGetLastError();
}
|
847c8c40e38ccfca139ede20ea48319e3ad3e38e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include "matlib.cu"
#include "exactTotalSum.cu"
#include "maxColSumP.cu"
#define BLOCK_SIZE 32
#define EPS 2.2204e-16
__global__
void HKernel(Matrix d_A, Matrix d_B, Matrix d_C, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
int idx = row*d_A.width+col;
d_Out.elements[idx] = d_A.elements[idx] - (d_B.elements[idx] / (d_C.elements[idx]+EPS));
}
void H(Matrix A, Matrix B, Matrix C, Matrix Out) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
hipError_t err = hipMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", hipGetErrorString(err));
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", hipGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = hipMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", hipGetErrorString(err));
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", hipGetErrorString(err));
// load C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = hipMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", hipGetErrorString(err));
hipMemcpy(d_C.elements, C.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix C to device: %s\n", hipGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
hipMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
hipLaunchKernelGGL(( HKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, d_Out);
err = hipDeviceSynchronize();
//printf("Run H kernel: %s\n", hipGetErrorString(err));
// read Out from device memory
err = hipMemcpy(Out.elements, d_Out.elements, size, hipMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",hipGetErrorString(err));
// free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
hipFree(d_Out.elements);
}
// matrix lambda kernel called by lambda()
__global__
void lambdaKernel(Matrix d_A, Matrix d_B, Matrix d_C, Matrix d_D, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
int idx = row*d_A.width+col;
d_Out.elements[idx] = d_A.elements[idx] - (d_B.elements[idx] / (d_C.elements[idx]+EPS)) + (d_B.elements[idx] / (d_D.elements[idx]+EPS));
}
void lambda(Matrix A, Matrix B, Matrix C, Matrix D, Matrix Out) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
hipError_t err = hipMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", hipGetErrorString(err));
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", hipGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = hipMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", hipGetErrorString(err));
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", hipGetErrorString(err));
// load C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = hipMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", hipGetErrorString(err));
hipMemcpy(d_C.elements, C.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix C to device: %s\n", hipGetErrorString(err));
// load C to device memory
Matrix d_D;
d_D.width = D.width;
d_D.height = D.height;
err = hipMalloc(&d_D.elements, size);
//printf("CUDA malloc D: %s\n", hipGetErrorString(err));
hipMemcpy(d_D.elements, D.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix D to device: %s\n", hipGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
err = hipMalloc(&d_Out.elements, size);
//printf("CUDA malloc Out: %s\n", hipGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
hipLaunchKernelGGL(( lambdaKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, d_D, d_Out);
err = hipDeviceSynchronize();
//printf("Run lambda kernel: %s\n", hipGetErrorString(err));
// read Out from device memory
err = hipMemcpy(Out.elements, d_Out.elements, size, hipMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",hipGetErrorString(err));
// free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
hipFree(d_D.elements);
hipFree(d_Out.elements);
}
__global__
void FKernel(Matrix d_A, Matrix d_B, Matrix d_C, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row*d_A.width+col;
if(row >= d_A.height || col >= d_A.width) return;
d_Out.elements[idx] = (d_A.elements[idx] + d_B.elements[idx] + d_C.elements[idx]) / 3;
}
void Fun(Matrix A, Matrix B, Matrix C, Matrix Out) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
hipError_t err = hipMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", hipGetErrorString(err));
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", hipGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = hipMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", hipGetErrorString(err));
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", hipGetErrorString(err));
// load C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = hipMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", hipGetErrorString(err));
hipMemcpy(d_C.elements, C.elements, size, hipMemcpyHostToDevice);
//printf("Copy input matrix C to device: %s\n", hipGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
err = hipMalloc(&d_Out.elements, size);
//printf("CUDA malloc Out: %s\n", hipGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
hipLaunchKernelGGL(( FKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, d_Out);
err = hipDeviceSynchronize();
//printf("Run kernel: %s\n", hipGetErrorString(err));
// read Out from device memory
err = hipMemcpy(Out.elements, d_Out.elements, size, hipMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",hipGetErrorString(err));
// free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
hipFree(d_Out.elements);
}
void nearestDSmax_RE(Matrix Y, Matrix maxRowSum, Matrix maxColSum, double totalSum, double maxLoops, double precision, Matrix F) {
int size = Y.height * Y.width * sizeof(double);
// lambda1 = zeroes(size(Y));
Matrix lambda1, lambda2, lambda3;
lambda1.height = lambda2.height = lambda3.height = Y.height;
lambda1.width = lambda2.width = lambda3.width = Y.width;
lambda1.elements = (double*)malloc(size);
lambda2.elements = (double*)malloc(size);
lambda3.elements = (double*)malloc(size);
zeros(lambda1);
// lambda2 = lambda1; lambda3 = lambda1;
memcpy(lambda2.elements, lambda1.elements, size);
memcpy(lambda3.elements, lambda1.elements, size);
// F1 = totalsum * (Y ./ sum(Y(:)));
Matrix F1, F2, F3;
F1.height = F2.height = F3.height = Y.height;
F1.width = F2.width = F3.width = Y.width;
F1.elements = (double*)malloc(size);
F2.elements = (double*)malloc(size);
F3.elements = (double*)malloc(size);
//printf("before sum(Y(:))\n");
// sum(Y(:))
thrust::host_vector<double> h_Y(Y.elements, Y.elements + Y.width * Y.height);
thrust::device_vector<double> d_Y = h_Y;
double Ysum = thrust::reduce(d_Y.begin(), d_Y.end(), (double) 0, thrust::plus<double>());
//printf("after sum(Y(:))\n");
// Y ./ sum(Y(:))
Matrix YdivYsum;
YdivYsum.width = Y.width;
YdivYsum.height = Y.height;
YdivYsum.elements = (double*)malloc(size);
matTimesScaler(Y, 1/Ysum, YdivYsum);
matTimesScaler(YdivYsum, totalSum, F1);
// F2 = F1; F3 = F1;
memcpy(F2.elements, F1.elements, size);
memcpy(F3.elements, F1.elements, size);
Matrix H1, H2, H3;
H1.width = H2.width = H3.width = Y.width;
H1.height = H2.height = H3.height = Y.height;
H1.elements = (double*)malloc(size);
H2.elements = (double*)malloc(size);
H3.elements = (double*)malloc(size);
Matrix F1eps, F2eps, F3eps;
F1eps.width = F2eps.width = F3eps.width = Y.width;
F1eps.height = F2eps.height = F3eps.height = Y.height;
F1eps.elements = (double*)malloc(size);
F2eps.elements = (double*)malloc(size);
F3eps.elements = (double*)malloc(size);
Matrix YdivF1eps, YdivF2eps, YdivF3eps;
YdivF1eps.width = YdivF2eps.width = YdivF3eps.width = Y.width;
YdivF1eps.height = YdivF2eps.height = YdivF3eps.height = Y.height;
YdivF1eps.elements = (double*)malloc(size);
YdivF2eps.elements = (double*)malloc(size);
YdivF3eps.elements = (double*)malloc(size);
Matrix negH2, negH3;
negH2.width = negH3.width = Y.width;
negH2.height = negH3.height = Y.height;
negH2.elements = (double*)malloc(size);
negH3.elements = (double*)malloc(size);
// transposed matrices
Matrix H1t, negH1t, Yt, F1t, negH3t;
H1t.width = negH1t.width = Yt.width = F1t.width = Y.height;
H1t.height = negH1t.height = Yt.height = F1t.height = Y.width;
negH3t.height = H3.width;
negH3t.width = H3.height;
negH3t.elements = (double*)malloc(size);
H1t.elements = (double*)malloc(size);
negH1t.elements = (double*)malloc(size);
Yt.elements = (double*)malloc(size);
F1t.elements = (double*)malloc(size);
Matrix Fdiff1, Fdiff2;
Fdiff1.width = Fdiff2.width = Y.width;
Fdiff1.height = Fdiff2.height = Y.height;
Fdiff1.elements = (double*)malloc(size);
Fdiff2.elements = (double*)malloc(size);
// F3reshape is a col vector
Matrix F3reshape;
F3reshape.width = 1;
F3reshape.height = Y.width*Y.height;
F3reshape.elements = (double*)malloc(size);
Matrix maxRowSumT;
maxRowSumT.width = Y.height;
maxRowSumT.height = 1;
maxRowSumT.elements = (double*)malloc(maxRowSumT.width*sizeof(double));
//for t = 1 : maxLoops
for(int t=0; t < maxLoops; t++) {
// Max row sum
// H1 = lambda1 - (Y ./ (F3+eps));
H(lambda1, Y, F3, H1);
//F1 = maxColSumP(Y', -H1', maxRowSum', precision)';
//-H1'
transpose(H1, H1t);
matTimesScaler(H1t, -1, negH1t);
//Y'
transpose(Y, Yt);
//maxRowSum'
transpose(maxRowSum, maxRowSumT);
// transpose(F1, F1t);
//maxColSumP(Y', -H1', maxRowSum', precision)'
maxColSumP(Yt, negH1t, maxRowSumT, 0.01, F1t);
//F1
transpose(F1t, F1);
// lambda1 = lambda1 - (Y ./ (F3+eps)) + (Y ./ (F1+eps));
lambda(lambda1, Y, F3, F1, lambda1);
// Max col sum
// H2 = lambda2 - (Y ./ (F1+eps));
H(lambda2, Y, F1, H2);
// F2 = maxColSumP (Y, -H2, maxColSum, precision);
matTimesScaler(H2, -1, negH2);
maxColSumP(Y, negH2, maxColSum, precision, F2);
// lambda2 = lambda2 - (Y ./ (F1+eps)) + (Y ./ (F2+eps));
lambda(lambda2, Y, F1, F2, lambda2);
// Total sum
// H3 = lambda3 - (Y ./ (F2 + eps));
H(lambda3, Y, F2, H3);
matTimesScaler(H3, -1, negH3);
// F3 = reshape( exactTotalSum (Y(:), -H3(:), totalSum, precision), size(Y) );
transpose(Y, Yt);
transpose(negH3, negH3t);
exactTotalSum(Yt, negH3t, totalSum, precision, F3reshape);
reshape(F3reshape, F3);
//lambda3 = lambda3 - (Y ./ (F2+eps)) + (Y ./ (F3+eps));
lambda(lambda3, Y, F2, F3, lambda3);
matSub(F1, F2, Fdiff1);
matSub(F1, F3, Fdiff2);
// max and min of Fdiff1
thrust::host_vector<double> h_Fdiff1(Fdiff1.elements, Fdiff1.elements + Fdiff1.width*Fdiff1.height);
thrust::device_vector<double> d_Fdiff1 = h_Fdiff1;
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff1max = thrust::max_element(d_Fdiff1.begin(), d_Fdiff1.end());
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff1min = thrust::min_element(d_Fdiff1.begin(), d_Fdiff1.end());
// max and min of Fdiff2
thrust::host_vector<double> h_Fdiff2(Fdiff2.elements, Fdiff2.elements + Fdiff2.width*Fdiff2.height);
thrust::device_vector<double> d_Fdiff2 = h_Fdiff2;
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff2max = thrust::max_element(d_Fdiff2.begin(), d_Fdiff2.end());
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff2min = thrust::min_element(d_Fdiff2.begin(), d_Fdiff2.end());
double fdMax1 = max(*Fdiff1max, fabs(*Fdiff1min));
double fdMax2 = max(*Fdiff2max, fabs(*Fdiff2min));
if(fabs(fdMax1) < precision && fabs(fdMax2) < precision)
break;
} // end of t for loop
// F = (F1 + F2 + F3) / 3;
Fun(F1, F2, F3, F);
free(lambda1.elements);
free(lambda2.elements);
free(lambda3.elements);
free(F1.elements);
free(F2.elements);
free(F3.elements);
free(H1.elements);
free(H2.elements);
free(H3.elements);
free(F1eps.elements);
free(F2eps.elements);
free(F3eps.elements);
free(YdivF1eps.elements);
free(YdivF2eps.elements);
free(YdivF3eps.elements);
free(negH1t.elements);
free(negH2.elements);
free(negH3.elements);
free(H1t.elements);
free(Yt.elements);
free(maxRowSumT.elements);
free(F1t.elements);
free(F3reshape.elements);
free(Fdiff1.elements);
free(Fdiff2.elements);
}
|
847c8c40e38ccfca139ede20ea48319e3ad3e38e.cu
|
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include "matlib.cu"
#include "exactTotalSum.cu"
#include "maxColSumP.cu"
#define BLOCK_SIZE 32
#define EPS 2.2204e-16
__global__
void HKernel(Matrix d_A, Matrix d_B, Matrix d_C, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
int idx = row*d_A.width+col;
d_Out.elements[idx] = d_A.elements[idx] - (d_B.elements[idx] / (d_C.elements[idx]+EPS));
}
void H(Matrix A, Matrix B, Matrix C, Matrix Out) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", cudaGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = cudaMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", cudaGetErrorString(err));
// load C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = cudaMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_C.elements, C.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix C to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
cudaMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
HKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, d_Out);
err = cudaThreadSynchronize();
//printf("Run H kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
cudaFree(d_Out.elements);
}
// matrix lambda kernel called by lambda()
__global__
void lambdaKernel(Matrix d_A, Matrix d_B, Matrix d_C, Matrix d_D, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
int idx = row*d_A.width+col;
d_Out.elements[idx] = d_A.elements[idx] - (d_B.elements[idx] / (d_C.elements[idx]+EPS)) + (d_B.elements[idx] / (d_D.elements[idx]+EPS));
}
void lambda(Matrix A, Matrix B, Matrix C, Matrix D, Matrix Out) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", cudaGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = cudaMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", cudaGetErrorString(err));
// load C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = cudaMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_C.elements, C.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix C to device: %s\n", cudaGetErrorString(err));
// load C to device memory
Matrix d_D;
d_D.width = D.width;
d_D.height = D.height;
err = cudaMalloc(&d_D.elements, size);
//printf("CUDA malloc D: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_D.elements, D.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix D to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
err = cudaMalloc(&d_Out.elements, size);
//printf("CUDA malloc Out: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
lambdaKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, d_D, d_Out);
err = cudaThreadSynchronize();
//printf("Run lambda kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
cudaFree(d_D.elements);
cudaFree(d_Out.elements);
}
__global__
void FKernel(Matrix d_A, Matrix d_B, Matrix d_C, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row*d_A.width+col;
if(row >= d_A.height || col >= d_A.width) return;
d_Out.elements[idx] = (d_A.elements[idx] + d_B.elements[idx] + d_C.elements[idx]) / 3;
}
void Fun(Matrix A, Matrix B, Matrix C, Matrix Out) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", cudaGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = cudaMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", cudaGetErrorString(err));
// load C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = cudaMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_C.elements, C.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix C to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
err = cudaMalloc(&d_Out.elements, size);
//printf("CUDA malloc Out: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
FKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
cudaFree(d_Out.elements);
}
void nearestDSmax_RE(Matrix Y, Matrix maxRowSum, Matrix maxColSum, double totalSum, double maxLoops, double precision, Matrix F) {
int size = Y.height * Y.width * sizeof(double);
// lambda1 = zeroes(size(Y));
Matrix lambda1, lambda2, lambda3;
lambda1.height = lambda2.height = lambda3.height = Y.height;
lambda1.width = lambda2.width = lambda3.width = Y.width;
lambda1.elements = (double*)malloc(size);
lambda2.elements = (double*)malloc(size);
lambda3.elements = (double*)malloc(size);
zeros(lambda1);
// lambda2 = lambda1; lambda3 = lambda1;
memcpy(lambda2.elements, lambda1.elements, size);
memcpy(lambda3.elements, lambda1.elements, size);
// F1 = totalsum * (Y ./ sum(Y(:)));
Matrix F1, F2, F3;
F1.height = F2.height = F3.height = Y.height;
F1.width = F2.width = F3.width = Y.width;
F1.elements = (double*)malloc(size);
F2.elements = (double*)malloc(size);
F3.elements = (double*)malloc(size);
//printf("before sum(Y(:))\n");
// sum(Y(:))
thrust::host_vector<double> h_Y(Y.elements, Y.elements + Y.width * Y.height);
thrust::device_vector<double> d_Y = h_Y;
double Ysum = thrust::reduce(d_Y.begin(), d_Y.end(), (double) 0, thrust::plus<double>());
//printf("after sum(Y(:))\n");
// Y ./ sum(Y(:))
Matrix YdivYsum;
YdivYsum.width = Y.width;
YdivYsum.height = Y.height;
YdivYsum.elements = (double*)malloc(size);
matTimesScaler(Y, 1/Ysum, YdivYsum);
matTimesScaler(YdivYsum, totalSum, F1);
// F2 = F1; F3 = F1;
memcpy(F2.elements, F1.elements, size);
memcpy(F3.elements, F1.elements, size);
Matrix H1, H2, H3;
H1.width = H2.width = H3.width = Y.width;
H1.height = H2.height = H3.height = Y.height;
H1.elements = (double*)malloc(size);
H2.elements = (double*)malloc(size);
H3.elements = (double*)malloc(size);
Matrix F1eps, F2eps, F3eps;
F1eps.width = F2eps.width = F3eps.width = Y.width;
F1eps.height = F2eps.height = F3eps.height = Y.height;
F1eps.elements = (double*)malloc(size);
F2eps.elements = (double*)malloc(size);
F3eps.elements = (double*)malloc(size);
Matrix YdivF1eps, YdivF2eps, YdivF3eps;
YdivF1eps.width = YdivF2eps.width = YdivF3eps.width = Y.width;
YdivF1eps.height = YdivF2eps.height = YdivF3eps.height = Y.height;
YdivF1eps.elements = (double*)malloc(size);
YdivF2eps.elements = (double*)malloc(size);
YdivF3eps.elements = (double*)malloc(size);
Matrix negH2, negH3;
negH2.width = negH3.width = Y.width;
negH2.height = negH3.height = Y.height;
negH2.elements = (double*)malloc(size);
negH3.elements = (double*)malloc(size);
// transposed matrices
Matrix H1t, negH1t, Yt, F1t, negH3t;
H1t.width = negH1t.width = Yt.width = F1t.width = Y.height;
H1t.height = negH1t.height = Yt.height = F1t.height = Y.width;
negH3t.height = H3.width;
negH3t.width = H3.height;
negH3t.elements = (double*)malloc(size);
H1t.elements = (double*)malloc(size);
negH1t.elements = (double*)malloc(size);
Yt.elements = (double*)malloc(size);
F1t.elements = (double*)malloc(size);
Matrix Fdiff1, Fdiff2;
Fdiff1.width = Fdiff2.width = Y.width;
Fdiff1.height = Fdiff2.height = Y.height;
Fdiff1.elements = (double*)malloc(size);
Fdiff2.elements = (double*)malloc(size);
// F3reshape is a col vector
Matrix F3reshape;
F3reshape.width = 1;
F3reshape.height = Y.width*Y.height;
F3reshape.elements = (double*)malloc(size);
Matrix maxRowSumT;
maxRowSumT.width = Y.height;
maxRowSumT.height = 1;
maxRowSumT.elements = (double*)malloc(maxRowSumT.width*sizeof(double));
//for t = 1 : maxLoops
for(int t=0; t < maxLoops; t++) {
// Max row sum
// H1 = lambda1 - (Y ./ (F3+eps));
H(lambda1, Y, F3, H1);
//F1 = maxColSumP(Y', -H1', maxRowSum', precision)';
//-H1'
transpose(H1, H1t);
matTimesScaler(H1t, -1, negH1t);
//Y'
transpose(Y, Yt);
//maxRowSum'
transpose(maxRowSum, maxRowSumT);
// transpose(F1, F1t);
//maxColSumP(Y', -H1', maxRowSum', precision)'
maxColSumP(Yt, negH1t, maxRowSumT, 0.01, F1t);
//F1
transpose(F1t, F1);
// lambda1 = lambda1 - (Y ./ (F3+eps)) + (Y ./ (F1+eps));
lambda(lambda1, Y, F3, F1, lambda1);
// Max col sum
// H2 = lambda2 - (Y ./ (F1+eps));
H(lambda2, Y, F1, H2);
// F2 = maxColSumP (Y, -H2, maxColSum, precision);
matTimesScaler(H2, -1, negH2);
maxColSumP(Y, negH2, maxColSum, precision, F2);
// lambda2 = lambda2 - (Y ./ (F1+eps)) + (Y ./ (F2+eps));
lambda(lambda2, Y, F1, F2, lambda2);
// Total sum
// H3 = lambda3 - (Y ./ (F2 + eps));
H(lambda3, Y, F2, H3);
matTimesScaler(H3, -1, negH3);
// F3 = reshape( exactTotalSum (Y(:), -H3(:), totalSum, precision), size(Y) );
transpose(Y, Yt);
transpose(negH3, negH3t);
exactTotalSum(Yt, negH3t, totalSum, precision, F3reshape);
reshape(F3reshape, F3);
//lambda3 = lambda3 - (Y ./ (F2+eps)) + (Y ./ (F3+eps));
lambda(lambda3, Y, F2, F3, lambda3);
matSub(F1, F2, Fdiff1);
matSub(F1, F3, Fdiff2);
// max and min of Fdiff1
thrust::host_vector<double> h_Fdiff1(Fdiff1.elements, Fdiff1.elements + Fdiff1.width*Fdiff1.height);
thrust::device_vector<double> d_Fdiff1 = h_Fdiff1;
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff1max = thrust::max_element(d_Fdiff1.begin(), d_Fdiff1.end());
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff1min = thrust::min_element(d_Fdiff1.begin(), d_Fdiff1.end());
// max and min of Fdiff2
thrust::host_vector<double> h_Fdiff2(Fdiff2.elements, Fdiff2.elements + Fdiff2.width*Fdiff2.height);
thrust::device_vector<double> d_Fdiff2 = h_Fdiff2;
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff2max = thrust::max_element(d_Fdiff2.begin(), d_Fdiff2.end());
thrust::detail::normal_iterator<thrust::device_ptr<double> > Fdiff2min = thrust::min_element(d_Fdiff2.begin(), d_Fdiff2.end());
double fdMax1 = max(*Fdiff1max, fabs(*Fdiff1min));
double fdMax2 = max(*Fdiff2max, fabs(*Fdiff2min));
if(fabs(fdMax1) < precision && fabs(fdMax2) < precision)
break;
} // end of t for loop
// F = (F1 + F2 + F3) / 3;
Fun(F1, F2, F3, F);
free(lambda1.elements);
free(lambda2.elements);
free(lambda3.elements);
free(F1.elements);
free(F2.elements);
free(F3.elements);
free(H1.elements);
free(H2.elements);
free(H3.elements);
free(F1eps.elements);
free(F2eps.elements);
free(F3eps.elements);
free(YdivF1eps.elements);
free(YdivF2eps.elements);
free(YdivF3eps.elements);
free(negH1t.elements);
free(negH2.elements);
free(negH3.elements);
free(H1t.elements);
free(Yt.elements);
free(maxRowSumT.elements);
free(F1t.elements);
free(F3reshape.elements);
free(Fdiff1.elements);
free(Fdiff2.elements);
}
|
7cda3406c2469cc2adedf2a7e6609ddf1cbbdce1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <utility>
#include <cstring>
#include <stdlib.h>
#include <hip/hip_runtime.h>
using namespace std;
int main(){
ifstream in;
in.open("enc_mat");
int r;
in >> r;
vector<pair<bool,vector<int> > > encmat(r);
for(int i = 0; i<r; i++){
in >> encmat[i].first;
int size;
in >> size;
vector<int> v(size);
for(int j=0;j<size;j++){
in >> v[j];
}
encmat[i].second = v;
v.clear();
}
int masksize = (r%8>0 ? r/8+1 : r/8);
unsigned char* mask = (unsigned char*)malloc(masksize*sizeof(unsigned char));
memset(mask,0,masksize);
for(int i=0;i<r;i++){
bool flag = encmat[i].first;
vector<int> v = encmat[i].second;
int count = 0;
for(int j=0; j<v.size(); j++){
if(flag){
for(int k=count;k<count+v[j];k++){
mask[k/8] |= (0x80 >> k%8);
}
count+=v[j];
flag=0;
}
else{
count += v[j];
flag=1;
}
}
}
for(int i=0;i<masksize;i++)
printf("%d\n",mask[i]);
return 0;
}
|
7cda3406c2469cc2adedf2a7e6609ddf1cbbdce1.cu
|
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <utility>
#include <cstring>
#include <stdlib.h>
#include <cuda.h>
using namespace std;
int main(){
ifstream in;
in.open("enc_mat");
int r;
in >> r;
vector<pair<bool,vector<int> > > encmat(r);
for(int i = 0; i<r; i++){
in >> encmat[i].first;
int size;
in >> size;
vector<int> v(size);
for(int j=0;j<size;j++){
in >> v[j];
}
encmat[i].second = v;
v.clear();
}
int masksize = (r%8>0 ? r/8+1 : r/8);
unsigned char* mask = (unsigned char*)malloc(masksize*sizeof(unsigned char));
memset(mask,0,masksize);
for(int i=0;i<r;i++){
bool flag = encmat[i].first;
vector<int> v = encmat[i].second;
int count = 0;
for(int j=0; j<v.size(); j++){
if(flag){
for(int k=count;k<count+v[j];k++){
mask[k/8] |= (0x80 >> k%8);
}
count+=v[j];
flag=0;
}
else{
count += v[j];
flag=1;
}
}
}
for(int i=0;i<masksize;i++)
printf("%d\n",mask[i]);
return 0;
}
|
1beaa8d187936a874e21dd6d9f35de23f693ea79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutFixedForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutFixedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutFixedForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
caffe_gpu_scal(count, Dtype(1.0 - threshold_), top_data);
}
}
template <typename Dtype>
__global__ void DropoutFixedBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutFixedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutFixedBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_CLASS(DropoutFixedLayer);
} // namespace caffe
|
1beaa8d187936a874e21dd6d9f35de23f693ea79.cu
|
#include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutFixedForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutFixedLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutFixedForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
caffe_gpu_scal(count, Dtype(1.0 - threshold_), top_data);
}
}
template <typename Dtype>
__global__ void DropoutFixedBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutFixedLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutFixedBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_CLASS(DropoutFixedLayer);
} // namespace caffe
|
1c1a3843f72236c300004d0d6efa2139dbb30b44.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* test_mc.c
*
* Created on: 02-Feb-2009
* Author: alee
*/
#include <stdio.h>
#include "rng.h"
#include <cutil.h>
#include "reduce.h"
#include "mc_gauss.h"
#include "mc_mix_gauss.h"
#include "mc_mix_gauss_mu.h"
#include "gauss.h"
#include "test_functions.h"
#include "mc_gauss_mv.h"
#include "mix_gauss.h"
#include "matrix.h"
#include "order.h"
void test_mcgaussmv_nolog(int N, int D, float* h_args_p, float* h_args_q, float* props) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 0);
for (int i = 0; i < N; i++) {
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
hipMemcpy(d_array, props, N * D * sizeof(float), hipMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 0, 32, 128);
hipDeviceSynchronize();
multiply(N, D, d_array, d_array, d_warray, 32, 128);
reduce(N, d_warray, sumw, 32, 128);
reduce(N, D, d_array, sum, 32, 128);
hipDeviceSynchronize();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
hipFree(d_array);
hipFree(d_warray);
}
void test_mcgaussmv_log(int N, int D, float* h_args_p, float* h_args_q, float* props) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
hipMemcpy(d_array, props, N * D * sizeof(float), hipMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, 32, 128);
hipDeviceSynchronize();
maximum(N, d_warray, maxlw, 32, 128);
add(N, d_warray, d_warray, -maxlw, 32, 128);
exp(N, d_warray, d_warray, 32, 128);
multiply(N, D, d_array, d_array, d_warray, 32, 128);
reduce(N, d_warray, sumw, 32, 128);
reduce(N, D, d_array, sum, 32, 128);
hipDeviceSynchronize();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
hipFree(d_array);
hipFree(d_warray);
}
// importance sampling with multivariate Gaussian proposal and target distributions
void test_mcgauss_mv(int N) {
const int D = 2;
printf("\nIS: Gaussian-Gaussian 2D\n");
float h_args_p[1 + D * D + D];
float cov_p[D * D];
matrix_set(cov_p, D, D, 0, 0, 1.0f);
matrix_set(cov_p, D, D, 0, 1, 0.5f);
matrix_set(cov_p, D, D, 1, 0, 0.5f);
matrix_set(cov_p, D, D, 1, 1, 2.0f);
compute_c1_c2(cov_p, D, h_args_p[0], h_args_p + 1);
h_args_p[5] = 1;
h_args_p[6] = 1;
float h_args_q[1 + D * D + D];
float cov_q[D * D];
matrix_set(cov_q, D, D, 0, 0, 1.0f);
matrix_set(cov_q, D, D, 0, 1, 0.0f);
matrix_set(cov_q, D, D, 1, 0, 0.0f);
matrix_set(cov_q, D, D, 1, 1, 1.0f);
compute_c1_c2(cov_q, D, h_args_q[0], h_args_q + 1);
h_args_q[5] = 0;
h_args_q[6] = 0;
float* array = (float*) malloc(N * D * sizeof(float));
populate_randn(array, N * D);
hipDeviceSynchronize();
test_mcgaussmv_nolog(N, D, h_args_p, h_args_q, array);
test_mcgaussmv_log(N, D, h_args_p, h_args_q, array);
free(array);
}
// importance sampling with univariate Gaussian proposal and target distributions
void test_mcgauss(int N) {
unsigned int hTimer;
double ctime, gtime;
cutCreateTimer(&hTimer);
printf("\nIS: Gaussian-Gaussian 1D\n");
float h_args_p[3];
float h_args_q[3];
// p is N(2,0.25), q is N(0,1)
compute_c1_c2(0.5f, h_args_p[0], h_args_p[1]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_p[2] = 2;
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
float h_sum = 0;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nn(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
cutStopTimer(hTimer);
ctime = cutGetTimerValue(hTimer);
printf("Time = %f\n", ctime);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
hipMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nn(N, d_array, d_warray, h_args_p, h_args_q, 32, 128);
hipDeviceSynchronize();
multiply(N, d_array, d_array2, d_warray, 32, 128);
hipDeviceSynchronize();
reduce(N, d_array2, h_sum, 32, 128);
hipDeviceSynchronize();
cutStopTimer(hTimer);
gtime = cutGetTimerValue(hTimer);
printf("Time = %f\n", gtime);
printf("RESULT = %f\n", h_sum / N);
printf("speedup = %f\n", ctime / gtime);
hipFree(d_array);
hipFree(d_array2);
hipFree(d_warray);
}
// importance sampling with target distribution being a mixture of univariate Gaussians and
// proposal distribution being Gaussian
void test_mixgauss(int N) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians 1D\n");
const int k = 2;
float h_args_p[1 + 3 * k];
float h_args_q[3];
// p is N(2,2), q is N(0,1)
h_args_p[0] = k;
h_args_p[1] = 0;
h_args_p[2] = 3;
compute_ci1_ci2(0.5f, 0.5f, h_args_p[3], h_args_p[5]);
compute_ci1_ci2(0.5f, 0.5f, h_args_p[4], h_args_p[6]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
hipDeviceSynchronize();
float h_sum = 0;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nmni(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
hipMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nmni(N, d_array, d_warray, h_args_p, h_args_q, 32, 128);
hipDeviceSynchronize();
multiply(N, d_array, d_array2, d_warray, 32, 128);
hipDeviceSynchronize();
reduce(N, d_array2, h_sum, 32, 128);
hipDeviceSynchronize();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("RESULT = %f\n", h_sum / N);
hipFree(d_array);
hipFree(d_array2);
hipFree(d_warray);
}
// importance sampling with target distribution being the posterior distribution of the means of a
// Gaussian mixture model given 100 observations with known and shared variance, equal weights with
// uniform prior on (-10,10)^4. actual means are -3, 0, 3, and 6.
// proposal distribution is uniform (-10,10)^4.
void test_mix(int N) {
const int D = 4;
int nb = 512;
int nt = 128;
const int L = 100;
float sigma = 0.55f;
float mus[4];
mus[0] = -3;
mus[1] = 0;
mus[2] = 3;
mus[3] = 6;
float data_array[L];
generate_mix_data(D, sigma, mus, data_array, L);
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / D, c1, c2);
float h_args_p[L + 5];
h_args_p[0] = L;
for (int i = 0; i < L; i++) {
h_args_p[i + 1] = data_array[i];
}
h_args_p[L + 1] = c1;
h_args_p[L + 2] = c2;
h_args_p[L + 3] = -10;
h_args_p[L + 4] = 10;
float h_args_q[2];
h_args_q[0] = -10;
h_args_q[1] = 10;
unsigned int hTimer;
double time1, time2;
cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians: Mean Inference\n");
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
populate_rand_d(d_array, N * D);
multiply(N * D, d_array, d_array, 20, nb, nt);
hipDeviceSynchronize();
add(N * D, d_array, d_array, -10, nb, nt);
hipDeviceSynchronize();
float* array = (float*) malloc(N * D * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
float sum[D];
double sumd[D];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
hipMemcpy(array, d_array, N * D * sizeof(float), hipMemcpyDeviceToHost);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_mgmu_mv(N, D, array, warray, h_args_p, h_args_q, 1);
// cutStopTimer(hTimer);
// time1 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(array, D, i)[j];
}
sumwd += warray[i];
}
cutStopTimer(hTimer);
time1 = cutGetTimerValue(hTimer);
printf("Time = %f\n", time1);
printf("HOST RESULT = (%f, %f, %f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd, sumd[2] / sumwd,
sumd[3] / sumwd);
free(warray);
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_mgmu_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// time2 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time2);
maximum(N, d_warray, maxlw, nb, nt);
add(N, d_warray, d_warray, -maxlw, nb, nt);
exp(N, d_warray, d_warray, nb, nt);
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
hipDeviceSynchronize();
cutStopTimer(hTimer);
time2 = cutGetTimerValue(hTimer);
printf("Time = %f\n", time2);
printf("RESULT = (%f, %f, %f, %f)\n", sum[0] / sumw, sum[1] / sumw, sum[2] / sumw, sum[3]
/ sumw);
hipFree(d_array);
hipFree(d_warray);
printf("speedup = %f\n", time1 / time2);
}
int main(int argc, char **argv) {
seed_rng();
int N = 1048576;
// int N = 131072;
// int N = 65536;
// int N = 16777216;
test_mcgauss(N);
test_mcgauss_mv(N);
test_mixgauss(N);
test_mix(N);
kill_rng();
}
|
1c1a3843f72236c300004d0d6efa2139dbb30b44.cu
|
/*
* test_mc.c
*
* Created on: 02-Feb-2009
* Author: alee
*/
#include <stdio.h>
#include "rng.h"
#include <cutil.h>
#include "reduce.h"
#include "mc_gauss.h"
#include "mc_mix_gauss.h"
#include "mc_mix_gauss_mu.h"
#include "gauss.h"
#include "test_functions.h"
#include "mc_gauss_mv.h"
#include "mix_gauss.h"
#include "matrix.h"
#include "order.h"
void test_mcgaussmv_nolog(int N, int D, float* h_args_p, float* h_args_q, float* props) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 0);
for (int i = 0; i < N; i++) {
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
cudaMemcpy(d_array, props, N * D * sizeof(float), cudaMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 0, 32, 128);
cudaThreadSynchronize();
multiply(N, D, d_array, d_array, d_warray, 32, 128);
reduce(N, d_warray, sumw, 32, 128);
reduce(N, D, d_array, sum, 32, 128);
cudaThreadSynchronize();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
cudaFree(d_array);
cudaFree(d_warray);
}
void test_mcgaussmv_log(int N, int D, float* h_args_p, float* h_args_q, float* props) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
cudaMemcpy(d_array, props, N * D * sizeof(float), cudaMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, 32, 128);
cudaThreadSynchronize();
maximum(N, d_warray, maxlw, 32, 128);
add(N, d_warray, d_warray, -maxlw, 32, 128);
exp(N, d_warray, d_warray, 32, 128);
multiply(N, D, d_array, d_array, d_warray, 32, 128);
reduce(N, d_warray, sumw, 32, 128);
reduce(N, D, d_array, sum, 32, 128);
cudaThreadSynchronize();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
cudaFree(d_array);
cudaFree(d_warray);
}
// importance sampling with multivariate Gaussian proposal and target distributions
void test_mcgauss_mv(int N) {
const int D = 2;
printf("\nIS: Gaussian-Gaussian 2D\n");
float h_args_p[1 + D * D + D];
float cov_p[D * D];
matrix_set(cov_p, D, D, 0, 0, 1.0f);
matrix_set(cov_p, D, D, 0, 1, 0.5f);
matrix_set(cov_p, D, D, 1, 0, 0.5f);
matrix_set(cov_p, D, D, 1, 1, 2.0f);
compute_c1_c2(cov_p, D, h_args_p[0], h_args_p + 1);
h_args_p[5] = 1;
h_args_p[6] = 1;
float h_args_q[1 + D * D + D];
float cov_q[D * D];
matrix_set(cov_q, D, D, 0, 0, 1.0f);
matrix_set(cov_q, D, D, 0, 1, 0.0f);
matrix_set(cov_q, D, D, 1, 0, 0.0f);
matrix_set(cov_q, D, D, 1, 1, 1.0f);
compute_c1_c2(cov_q, D, h_args_q[0], h_args_q + 1);
h_args_q[5] = 0;
h_args_q[6] = 0;
float* array = (float*) malloc(N * D * sizeof(float));
populate_randn(array, N * D);
cudaThreadSynchronize();
test_mcgaussmv_nolog(N, D, h_args_p, h_args_q, array);
test_mcgaussmv_log(N, D, h_args_p, h_args_q, array);
free(array);
}
// importance sampling with univariate Gaussian proposal and target distributions
void test_mcgauss(int N) {
unsigned int hTimer;
double ctime, gtime;
cutCreateTimer(&hTimer);
printf("\nIS: Gaussian-Gaussian 1D\n");
float h_args_p[3];
float h_args_q[3];
// p is N(2,0.25), q is N(0,1)
compute_c1_c2(0.5f, h_args_p[0], h_args_p[1]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_p[2] = 2;
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
float h_sum = 0;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nn(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
cutStopTimer(hTimer);
ctime = cutGetTimerValue(hTimer);
printf("Time = %f\n", ctime);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
cudaMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nn(N, d_array, d_warray, h_args_p, h_args_q, 32, 128);
cudaThreadSynchronize();
multiply(N, d_array, d_array2, d_warray, 32, 128);
cudaThreadSynchronize();
reduce(N, d_array2, h_sum, 32, 128);
cudaThreadSynchronize();
cutStopTimer(hTimer);
gtime = cutGetTimerValue(hTimer);
printf("Time = %f\n", gtime);
printf("RESULT = %f\n", h_sum / N);
printf("speedup = %f\n", ctime / gtime);
cudaFree(d_array);
cudaFree(d_array2);
cudaFree(d_warray);
}
// importance sampling with target distribution being a mixture of univariate Gaussians and
// proposal distribution being Gaussian
void test_mixgauss(int N) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians 1D\n");
const int k = 2;
float h_args_p[1 + 3 * k];
float h_args_q[3];
// p is N(2,2), q is N(0,1)
h_args_p[0] = k;
h_args_p[1] = 0;
h_args_p[2] = 3;
compute_ci1_ci2(0.5f, 0.5f, h_args_p[3], h_args_p[5]);
compute_ci1_ci2(0.5f, 0.5f, h_args_p[4], h_args_p[6]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
cudaThreadSynchronize();
float h_sum = 0;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_nmni(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
cudaMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_nmni(N, d_array, d_warray, h_args_p, h_args_q, 32, 128);
cudaThreadSynchronize();
multiply(N, d_array, d_array2, d_warray, 32, 128);
cudaThreadSynchronize();
reduce(N, d_array2, h_sum, 32, 128);
cudaThreadSynchronize();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Time = %f\n", time);
printf("RESULT = %f\n", h_sum / N);
cudaFree(d_array);
cudaFree(d_array2);
cudaFree(d_warray);
}
// importance sampling with target distribution being the posterior distribution of the means of a
// Gaussian mixture model given 100 observations with known and shared variance, equal weights with
// uniform prior on (-10,10)^4. actual means are -3, 0, 3, and 6.
// proposal distribution is uniform (-10,10)^4.
void test_mix(int N) {
const int D = 4;
int nb = 512;
int nt = 128;
const int L = 100;
float sigma = 0.55f;
float mus[4];
mus[0] = -3;
mus[1] = 0;
mus[2] = 3;
mus[3] = 6;
float data_array[L];
generate_mix_data(D, sigma, mus, data_array, L);
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / D, c1, c2);
float h_args_p[L + 5];
h_args_p[0] = L;
for (int i = 0; i < L; i++) {
h_args_p[i + 1] = data_array[i];
}
h_args_p[L + 1] = c1;
h_args_p[L + 2] = c2;
h_args_p[L + 3] = -10;
h_args_p[L + 4] = 10;
float h_args_q[2];
h_args_q[0] = -10;
h_args_q[1] = 10;
unsigned int hTimer;
double time1, time2;
cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians: Mean Inference\n");
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
populate_rand_d(d_array, N * D);
multiply(N * D, d_array, d_array, 20, nb, nt);
cudaThreadSynchronize();
add(N * D, d_array, d_array, -10, nb, nt);
cudaThreadSynchronize();
float* array = (float*) malloc(N * D * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
float sum[D];
double sumd[D];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
cudaMemcpy(array, d_array, N * D * sizeof(float), cudaMemcpyDeviceToHost);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_ref_mgmu_mv(N, D, array, warray, h_args_p, h_args_q, 1);
// cutStopTimer(hTimer);
// time1 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(array, D, i)[j];
}
sumwd += warray[i];
}
cutStopTimer(hTimer);
time1 = cutGetTimerValue(hTimer);
printf("Time = %f\n", time1);
printf("HOST RESULT = (%f, %f, %f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd, sumd[2] / sumwd,
sumd[3] / sumwd);
free(warray);
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
is_mgmu_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// time2 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time2);
maximum(N, d_warray, maxlw, nb, nt);
add(N, d_warray, d_warray, -maxlw, nb, nt);
exp(N, d_warray, d_warray, nb, nt);
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
cudaThreadSynchronize();
cutStopTimer(hTimer);
time2 = cutGetTimerValue(hTimer);
printf("Time = %f\n", time2);
printf("RESULT = (%f, %f, %f, %f)\n", sum[0] / sumw, sum[1] / sumw, sum[2] / sumw, sum[3]
/ sumw);
cudaFree(d_array);
cudaFree(d_warray);
printf("speedup = %f\n", time1 / time2);
}
int main(int argc, char **argv) {
seed_rng();
int N = 1048576;
// int N = 131072;
// int N = 65536;
// int N = 16777216;
test_mcgauss(N);
test_mcgauss_mv(N);
test_mixgauss(N);
test_mix(N);
kill_rng();
}
|
2b7ebe15d7491773136534ee5d939df959d4489c.hip
|
// !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops.h"
#include "cuda_helper.h"
void Concat::map(void)
{
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(hipMalloc(&outputs[0].ptr, outputSize));
}
void Concat::unmap(void)
{
checkCUDA(hipFree(outputs[0].ptr));
}
void Concat::forward(void)
{
off_t offset = 0;
for (int i = 0; i < numInputs; i++) {
size_t size = sizeof(DATATYPE);
for (int j = 0; j < inputs[i].numDim; j++)
size *= inputs[i].dim[j];
if (needCopy[i])
checkCUDA(hipMemcpyAsync(((char*)outputs[0].ptr) + offset,
inputs[i].ptr, size,
hipMemcpyDeviceToDevice));
offset += size;
}
}
void Model::measure_concat_cost(Concat* concat)
{
string key=export_op_key(*concat);
for (int j = 0; j < concat->numInputs; j++) {
if (concat->needCopy[j]) key+=",<1>";
else key+=",<0>";
}
//printf("<pre_measure>, %s\n",key.c_str());
if(mp.find(key)!=mp.end())
{
concat->runtime=mp[key].runtime;
concat->power=mp[key].power;
concat->energy=mp[key].power*mp[key].runtime;
if(!mute)
{
printf("<found from mp>, %s, ",key.c_str());
printf("runtime=%f power=%f energe=%f\n", mp[key].runtime, mp[key].power, mp[key].power*mp[key].runtime);
}
return ;
}
checkCUDA(hipDeviceSynchronize());
checkCUDA(hipEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
for (int j = 0; j < concat->numInputs; j++) {
if (concat->needCopy[j]) {
size_t size = sizeof(DATATYPE);
for (int k = 0; k < concat->inputs[j].numDim; k++)
size *= concat->inputs[j].dim[k];
checkCUDA(hipMemcpyAsync(outputPtr, inputPtr, size,
hipMemcpyDeviceToDevice));
}
}
}
checkCUDA(hipEventRecord(endEvent));
checkCUDA(hipEventSynchronize(endEvent));
float milliseconds;
hipEventElapsedTime(&milliseconds, startEvent, endEvent);
//double runtime=concat->runtime = milliseconds / REPEAT_TIMES;
long times=0;
double current_time=get_current_time();
double current_time2;
start_check_power();
for (int i = 0; ; i++,times++) {
if(i%CHECK_TIME_PERIOD==0&&(current_time2=get_current_time())-current_time>measure_time) break;
for (int j = 0; j < concat->numInputs; j++) {
if (concat->needCopy[j]) {
size_t size = sizeof(DATATYPE);
for (int k = 0; k < concat->inputs[j].numDim; k++)
size *= concat->inputs[j].dim[k];
checkCUDA(hipMemcpyAsync(outputPtr, inputPtr, size,
hipMemcpyDeviceToDevice));
}
}
}
double power=finish_check_power();
double runtime=concat->runtime = (current_time2-current_time)/times;
printf("<measure>, %s, ",key.c_str());
printf("runtime=%f power=%f energy=%f\n",runtime,power,power*runtime);
concat->power=power;
concat->energy=power*runtime;
mp[key].runtime=runtime;
mp[key].power=power;
db_output<<key<<"|"<<runtime<<"|"<<power<<endl;
db_output.flush();
#ifdef VERBOSE
printf("measure[Concat]: cost(%.4lf)\n", concat->runtime);
#endif
}
|
2b7ebe15d7491773136534ee5d939df959d4489c.cu
|
/* Copyright 2018 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ops.h"
#include "cuda_helper.h"
void Concat::map(void)
{
size_t outputSize = sizeof(DATATYPE);
for (int i = 0; i < outputs[0].numDim; i++)
outputSize *= outputs[0].dim[i];
checkCUDA(cudaMalloc(&outputs[0].ptr, outputSize));
}
void Concat::unmap(void)
{
checkCUDA(cudaFree(outputs[0].ptr));
}
void Concat::forward(void)
{
off_t offset = 0;
for (int i = 0; i < numInputs; i++) {
size_t size = sizeof(DATATYPE);
for (int j = 0; j < inputs[i].numDim; j++)
size *= inputs[i].dim[j];
if (needCopy[i])
checkCUDA(cudaMemcpyAsync(((char*)outputs[0].ptr) + offset,
inputs[i].ptr, size,
cudaMemcpyDeviceToDevice));
offset += size;
}
}
void Model::measure_concat_cost(Concat* concat)
{
string key=export_op_key(*concat);
for (int j = 0; j < concat->numInputs; j++) {
if (concat->needCopy[j]) key+=",<1>";
else key+=",<0>";
}
//printf("<pre_measure>, %s\n",key.c_str());
if(mp.find(key)!=mp.end())
{
concat->runtime=mp[key].runtime;
concat->power=mp[key].power;
concat->energy=mp[key].power*mp[key].runtime;
if(!mute)
{
printf("<found from mp>, %s, ",key.c_str());
printf("runtime=%f power=%f energe=%f\n", mp[key].runtime, mp[key].power, mp[key].power*mp[key].runtime);
}
return ;
}
checkCUDA(cudaDeviceSynchronize());
checkCUDA(cudaEventRecord(startEvent));
for (int i = 0; i < REPEAT_TIMES; i++) {
for (int j = 0; j < concat->numInputs; j++) {
if (concat->needCopy[j]) {
size_t size = sizeof(DATATYPE);
for (int k = 0; k < concat->inputs[j].numDim; k++)
size *= concat->inputs[j].dim[k];
checkCUDA(cudaMemcpyAsync(outputPtr, inputPtr, size,
cudaMemcpyDeviceToDevice));
}
}
}
checkCUDA(cudaEventRecord(endEvent));
checkCUDA(cudaEventSynchronize(endEvent));
float milliseconds;
cudaEventElapsedTime(&milliseconds, startEvent, endEvent);
//double runtime=concat->runtime = milliseconds / REPEAT_TIMES;
long times=0;
double current_time=get_current_time();
double current_time2;
start_check_power();
for (int i = 0; ; i++,times++) {
if(i%CHECK_TIME_PERIOD==0&&(current_time2=get_current_time())-current_time>measure_time) break;
for (int j = 0; j < concat->numInputs; j++) {
if (concat->needCopy[j]) {
size_t size = sizeof(DATATYPE);
for (int k = 0; k < concat->inputs[j].numDim; k++)
size *= concat->inputs[j].dim[k];
checkCUDA(cudaMemcpyAsync(outputPtr, inputPtr, size,
cudaMemcpyDeviceToDevice));
}
}
}
double power=finish_check_power();
double runtime=concat->runtime = (current_time2-current_time)/times;
printf("<measure>, %s, ",key.c_str());
printf("runtime=%f power=%f energy=%f\n",runtime,power,power*runtime);
concat->power=power;
concat->energy=power*runtime;
mp[key].runtime=runtime;
mp[key].power=power;
db_output<<key<<"|"<<runtime<<"|"<<power<<endl;
db_output.flush();
#ifdef VERBOSE
printf("measure[Concat]: cost(%.4lf)\n", concat->runtime);
#endif
}
|
f68f0329176cda85b9b4a93a9dc3531a786fffd9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_hip.cuh"
// TODO: Add gaussian sources.
__global__ void copy_sources(float * target, int * x_position, int *y_position,
int * type, float * mean, float * variance,
int sources_size, long time_ticks) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i<sources_size){
int x = x_position[i];
int y = y_position[i];
if (type[i] == CONSTANT_SOURCE )
target[x + y * x_index_dim] = variance[i];
else if (type[i] == SINUSOID_SOURCE){
float temp = sinf(mean[i] * time_ticks * deltat);
float temp2 = variance[i];
target[x + y * x_index_dim] = temp2 * temp;
}
else
target[x + y * x_index_dim] = 1;
}
__syncthreads();
}
__global__ void update_Hx(float *Hx, float *Ez, float *coef1, float* coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int top = offset + x_index_dim;
if(y < y_index_dim - 1)
Hx[offset] = coef1[offset] * Hx[offset]
- coef2[offset] * (Ez[top] - Ez[offset]);
__syncthreads();
}
__global__ void update_Hy(float *Hy, float *Ez, float * coef1, float * coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int right = offset + 1;
if(x < x_index_dim -1)
Hy[offset] = coef1[offset] * Hy[offset] +
coef2[offset] * (Ez[right] - Ez[offset]);
__syncthreads();
}
__global__ void update_Ez(float *Hx, float *Hy, float *Ez, float * coef1,
float *coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int bottom = offset - x_index_dim;
if (x > 0 && y > 0 && x<x_index_dim - 1 && y < y_index_dim - 1){
Ez[offset] = coef1[offset] * Ez[offset] +
coef2[offset] * ((Hy[offset] - Hy[left]) -
(Hx[offset] - Hx[bottom]));
}
__syncthreads();
}
__global__ void tm_getcoeff(float *mu,
float * epsilon,
float *sigma,
float * sigma_star,
float * coef1,
float * coef2,
float * coef3,
float * coef4){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float mus = mu[offset];
float sigmamstar = sigma_star[offset];
float sigmam = sigma[offset];
float eps = epsilon[offset];
coef1[offset] = (2.0 * mus - sigmamstar * deltat) /
(2.0 * mus + sigmamstar * deltat);
coef2[offset] = (2 * deltat) / ((2 * mus + sigmamstar * deltat) * delta);
coef3[offset] = (2.0 * eps - sigmam * deltat) /
(2.0 * eps + sigmam * deltat);
coef4[offset] = (2.0 * deltat) /
((2 * eps + sigmam * deltat) * delta);
__syncthreads();
}
__device__ unsigned char value( float n1, float n2, int hue ) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2-n1)*hue/60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60));
return (unsigned char)(255 * n1);
}
__global__ void float_to_color( unsigned char *optr,
const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset*4 + 0] = value( m1, m2, h+120 );
optr[offset*4 + 1] = value( m1, m2, h );
optr[offset*4 + 2] = value( m1, m2, h -120 );
optr[offset*4 + 3] = 255;
}
__global__ void float_to_color( uchar4 *optr,
const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset].x = value( m1, m2, h+120 );
optr[offset].y = value( m1, m2, h );
optr[offset].z = value( m1, m2, h -120 );
optr[offset].w = 255;
}
void copy_symbols(Structure *structure){
checkCudaErrors(hipMemcpyToSymbol(x_index_dim, &(structure->x_index_dim),
sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(y_index_dim, &(structure->y_index_dim),
sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(delta, &(structure->dx),
sizeof(float)));
checkCudaErrors(hipMemcpyToSymbol(deltat, &(structure->dt),
sizeof(float)));
}
/* PML TM mode functions start here */
// Bad design. Too many arguments to the functions. Can't help it.
// FIXME sometime
__global__ void pml_tm_get_coefs(float *mu,
float *epsilon,
float *sigma_x,
float *sigma_y,
float *sigma_star_x,
float *sigma_star_y,
float *coef1,
float * coef2,
float * coef3,
float * coef4,
float * coef5,
float * coef6,
float * coef7,
float * coef8)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
float mus = mu[offset];
float eps = epsilon[offset];
float sigma_x_value = sigma_x[offset];
float sigma_y_value = sigma_y[offset];
float sigma_star_x_value = sigma_star_x[offset];
float sigma_star_y_value = sigma_star_y[offset];
coef1[offset] = (2.0 * mus - sigma_star_x_value * deltat) /
(2.0 * mus + sigma_star_x_value * deltat);
coef2[offset] = (2.0 * deltat) / ((2 * mus + sigma_star_x_value *deltat)
* delta);
coef3[offset] = (2.0 * mus - sigma_star_y_value * deltat) /
(2.0 * mus + sigma_star_y_value * deltat);
coef4[offset] = (2 * deltat) / ( (2 * mus +
sigma_star_y_value *deltat) * delta);
coef5[offset] = (2.0 * eps - sigma_x_value * deltat) /
(2.0 * eps + sigma_x_value * deltat);
coef6[offset] = (2.0 * deltat) /
((2 * eps + sigma_x_value * deltat) * delta);
coef7[offset] = (2.0 * eps - sigma_y_value * deltat) /
(2.0 * eps + sigma_y_value * deltat);
coef8[offset] = (2.0 * deltat) /
((2 * eps + sigma_y_value * deltat) * delta);
}
__global__ void update_pml_ezx(float * Ezx, float *Hy,
float * coef1, float *coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int left = offset - 1;
if (x > 0 && y > 0 && x<x_index_dim - 1 && y < y_index_dim - 1){
Ezx[offset] = coef1[offset] * Ezx[offset] +
coef2[offset] * (Hy[offset] - Hy[left]);
}
__syncthreads();
}
__global__ void update_pml_ezy(float * Ezy, float *Hx,
float * coef1, float *coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int bottom = offset - x_index_dim;
if (x > 0 && y > 0 && x<x_index_dim - 1 && y < y_index_dim - 1){
Ezy[offset] = coef1[offset] * Ezy[offset] -
coef2[offset] * (Hx[offset] - Hx[bottom]);
}
__syncthreads();
}
__global__ void update_pml_ez(float * Ezx, float *Ezy, float *Ez){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
Ez[offset] = Ezx[offset] + Ezy[offset];
__syncthreads();
}
|
f68f0329176cda85b9b4a93a9dc3531a786fffd9.cu
|
#include "kernels.cuh"
// TODO: Add gaussian sources.
__global__ void copy_sources(float * target, int * x_position, int *y_position,
int * type, float * mean, float * variance,
int sources_size, long time_ticks) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i<sources_size){
int x = x_position[i];
int y = y_position[i];
if (type[i] == CONSTANT_SOURCE )
target[x + y * x_index_dim] = variance[i];
else if (type[i] == SINUSOID_SOURCE){
float temp = sinf(mean[i] * time_ticks * deltat);
float temp2 = variance[i];
target[x + y * x_index_dim] = temp2 * temp;
}
else
target[x + y * x_index_dim] = 1;
}
__syncthreads();
}
__global__ void update_Hx(float *Hx, float *Ez, float *coef1, float* coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int top = offset + x_index_dim;
if(y < y_index_dim - 1)
Hx[offset] = coef1[offset] * Hx[offset]
- coef2[offset] * (Ez[top] - Ez[offset]);
__syncthreads();
}
__global__ void update_Hy(float *Hy, float *Ez, float * coef1, float * coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int right = offset + 1;
if(x < x_index_dim -1)
Hy[offset] = coef1[offset] * Hy[offset] +
coef2[offset] * (Ez[right] - Ez[offset]);
__syncthreads();
}
__global__ void update_Ez(float *Hx, float *Hy, float *Ez, float * coef1,
float *coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int bottom = offset - x_index_dim;
if (x > 0 && y > 0 && x<x_index_dim - 1 && y < y_index_dim - 1){
Ez[offset] = coef1[offset] * Ez[offset] +
coef2[offset] * ((Hy[offset] - Hy[left]) -
(Hx[offset] - Hx[bottom]));
}
__syncthreads();
}
__global__ void tm_getcoeff(float *mu,
float * epsilon,
float *sigma,
float * sigma_star,
float * coef1,
float * coef2,
float * coef3,
float * coef4){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float mus = mu[offset];
float sigmamstar = sigma_star[offset];
float sigmam = sigma[offset];
float eps = epsilon[offset];
coef1[offset] = (2.0 * mus - sigmamstar * deltat) /
(2.0 * mus + sigmamstar * deltat);
coef2[offset] = (2 * deltat) / ((2 * mus + sigmamstar * deltat) * delta);
coef3[offset] = (2.0 * eps - sigmam * deltat) /
(2.0 * eps + sigmam * deltat);
coef4[offset] = (2.0 * deltat) /
((2 * eps + sigmam * deltat) * delta);
__syncthreads();
}
__device__ unsigned char value( float n1, float n2, int hue ) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2-n1)*hue/60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60));
return (unsigned char)(255 * n1);
}
__global__ void float_to_color( unsigned char *optr,
const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset*4 + 0] = value( m1, m2, h+120 );
optr[offset*4 + 1] = value( m1, m2, h );
optr[offset*4 + 2] = value( m1, m2, h -120 );
optr[offset*4 + 3] = 255;
}
__global__ void float_to_color( uchar4 *optr,
const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset].x = value( m1, m2, h+120 );
optr[offset].y = value( m1, m2, h );
optr[offset].z = value( m1, m2, h -120 );
optr[offset].w = 255;
}
void copy_symbols(Structure *structure){
checkCudaErrors(cudaMemcpyToSymbol(x_index_dim, &(structure->x_index_dim),
sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(y_index_dim, &(structure->y_index_dim),
sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(delta, &(structure->dx),
sizeof(float)));
checkCudaErrors(cudaMemcpyToSymbol(deltat, &(structure->dt),
sizeof(float)));
}
/* PML TM mode functions start here */
// Bad design. Too many arguments to the functions. Can't help it.
// FIXME sometime
__global__ void pml_tm_get_coefs(float *mu,
float *epsilon,
float *sigma_x,
float *sigma_y,
float *sigma_star_x,
float *sigma_star_y,
float *coef1,
float * coef2,
float * coef3,
float * coef4,
float * coef5,
float * coef6,
float * coef7,
float * coef8)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
float mus = mu[offset];
float eps = epsilon[offset];
float sigma_x_value = sigma_x[offset];
float sigma_y_value = sigma_y[offset];
float sigma_star_x_value = sigma_star_x[offset];
float sigma_star_y_value = sigma_star_y[offset];
coef1[offset] = (2.0 * mus - sigma_star_x_value * deltat) /
(2.0 * mus + sigma_star_x_value * deltat);
coef2[offset] = (2.0 * deltat) / ((2 * mus + sigma_star_x_value *deltat)
* delta);
coef3[offset] = (2.0 * mus - sigma_star_y_value * deltat) /
(2.0 * mus + sigma_star_y_value * deltat);
coef4[offset] = (2 * deltat) / ( (2 * mus +
sigma_star_y_value *deltat) * delta);
coef5[offset] = (2.0 * eps - sigma_x_value * deltat) /
(2.0 * eps + sigma_x_value * deltat);
coef6[offset] = (2.0 * deltat) /
((2 * eps + sigma_x_value * deltat) * delta);
coef7[offset] = (2.0 * eps - sigma_y_value * deltat) /
(2.0 * eps + sigma_y_value * deltat);
coef8[offset] = (2.0 * deltat) /
((2 * eps + sigma_y_value * deltat) * delta);
}
__global__ void update_pml_ezx(float * Ezx, float *Hy,
float * coef1, float *coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int left = offset - 1;
if (x > 0 && y > 0 && x<x_index_dim - 1 && y < y_index_dim - 1){
Ezx[offset] = coef1[offset] * Ezx[offset] +
coef2[offset] * (Hy[offset] - Hy[left]);
}
__syncthreads();
}
__global__ void update_pml_ezy(float * Ezy, float *Hx,
float * coef1, float *coef2){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
int bottom = offset - x_index_dim;
if (x > 0 && y > 0 && x<x_index_dim - 1 && y < y_index_dim - 1){
Ezy[offset] = coef1[offset] * Ezy[offset] -
coef2[offset] * (Hx[offset] - Hx[bottom]);
}
__syncthreads();
}
__global__ void update_pml_ez(float * Ezx, float *Ezy, float *Ez){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * x_index_dim;
Ez[offset] = Ezx[offset] + Ezy[offset];
__syncthreads();
}
|
c4a8826090bc242e201457783898478373023768.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* file name: TilingMatrixV2.c
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
* nvcc TilingMatrixV2.c -lcublas -o TilingMatrixTest
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
* function name: matrixTriUpper
* description: sets a matrix to an upper bound triangle matrix
* parameters:
* &a GPU device pointer to a m X n matrix (A)
* Note:
*
* return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
* function name: cublasGetErrorString
* description: gets the cublas string error codes for printing
* parameters:
* error a cublas error status enum
* return: char pointer (string)
**********************************************************************/
const char* cublasGetErrorString(hipblasStatus_t status)
{
switch(status)
{
case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "unknown error";
}
/**************************************************************
* function name: storeC
* description: copies the final answers of tileC back to the corresponding indices of of Matrix C
*
* parameters:
* &tilec pointer to pre-allocated (tileLength X tileLength) matrix
* &matrixc pointer to large (m X m)matrix B
* int tileLength predefined length of tile
* int i caller outer loop value (helps define starting ROW index for tile)
* int j caller inner loop value (helps define starting COLUMN for tile)
*
****************************************************************/
void storeC (float *tileC, float *matrixC, int tileLength, int i, int j, int numTiles){
//pointer declarations
for(int Ti = (tileLength*i); Ti < (tileLength*i)+tileLength; Ti++){
for(int Tj = (tileLength*j); Tj < (tileLength*j) + tileLength; Tj++ ){
matrixC[(Ti * numTiles) + Tj] = tileC[(Ti *numTiles *tileLength)+Tj];
// printf("[%0.1f] ", tileC[(Ti *numTiles *tileLength)+Tj]);
}
// printf("\n");
}
// printf("\n");
}
/**************************************************************
* function name: fillA
* description: populates TileA with elements of matrix A that correspond to the to the correct starting indices and boundries of the tile.
*
* parameters:
* &tileA pointer to pre-allocated tileLength X tileLength matrix
* &matrixA pointer to large matrix A
* int tileLength predefined length of tile
* int i caller outer loop value (helps define starting ROW index for tile)
* int j caller inner loop value (helps define starting COLUMN for tile)
*
****************************************************************/
void fillA(float *tileA, float *matrixA, int tileLength, int i, int j, int numTiles){
//pointer declarations
for(int Ti = (tileLength*i); Ti < (tileLength*i)+tileLength; Ti++){
for(int Tj = (tileLength*j); Tj < (tileLength*j) + tileLength; Tj++ ){
tileA[(Ti * tileLength) + Tj] = matrixA[(Ti *numTiles *tileLength) + Tj];
// printf("[%0.1f] ", tileA[(Ti * tileLength) + Tj]);
}
// printf("\n");
}
// printf("\n");
}
/**************************************************************
* function name: fillB
* description: populates TileB with elements of matrix B that correspond to the to the correct starting indices and boundries of the
* tile.
*
* parameters:
* &tileB pointer to pre-allocated (tileLength X tileLength) matrix
* &matrixB pointer to large (m X m)matrix B
* int tileLength predefined length of tile
* int i caller outer loop value (helps define starting COLUMN index for tile)
* int j caller inner loop value (helps define starting ROW for tile)
*
****************************************************************/
void fillB(float *tileB, float *matrixB, int tileLength, int i, int j, int numTiles){
//pointer declarations
for(int Ti = (tileLength*j); Ti < (tileLength*j)+tileLength; Ti++){
for(int Tj = (tileLength*i); Tj < (tileLength*i) + tileLength; Tj++ ){
tileB[Ti * tileLength + Tj] = matrixB[Ti * numTiles* tileLength + Tj];
// printf("[%0.1f] ", tileB[Ti * tileLength + Tj]);
}
// printf("\n");
}
// printf("\n");
}
/**********************************************************************
* function name: matrixCpy
* description: Iterates through large (m X m) matricies A and B, continually creating smaller (tileLength * tileLength) matricies Ta and Tb that will be used by device to produce matrix C containing computed answers of MM of matrices A and B.
* parameters: handle
* &a GPU device pointer to a m X m matrix (A)
* &b GPU device pointer to a m X m matrix (B)
* &c GPU device output purpose pointer to a m X m matrix (C)
* int tileLength predefined max length of tile
* int m # of tiles that divide the length of matrices A & B
*
* return: none
* TODO implement kernel calls of cuBlas and TMM, implement another function or code that tranfers results of C tile to matrix C. FIGURE OUT WHY fillA and fillB piss off the compiler
**********************************************************************/
void matrixCpy(float *a, float *b, float *c, int tileLength, int m) {
hipError_t cudaStat; // hipMalloc & hipFree status
hipblasStatus_t stat; // CUBLAS functions statusx
hipblasHandle_t handle; // CUBLAS context
float al =1.0f; // al =1
float bet =1.0f; // bet =1
float *Ta,*Tb,*Tc, *d_a, *d_b, *d_c; // device and host TILE memory declaration
struct timeval copyTime;
double copy_elapsed_time;
struct timeval mathTime;
double math_elapsed_time;
int avgCopTemp = 0;
int avgMathTemp = 0;
int numberofIter = 0;
stat = hipblasCreate(&handle); // initialize CUBLAS context
if(stat != HIPBLAS_STATUS_SUCCESS)
printf("Cublas Create Error: %s\n", cublasGetErrorString(stat));
//Host memory alocation
Ta = (float*) malloc(tileLength*tileLength*sizeof(float)*3); // host tile memory alloc for a
Tb = (float*) malloc(tileLength*tileLength*sizeof(float)*3); // host tile memory alloc for b
Tc = (float*) malloc(tileLength*tileLength*sizeof(float)*3); // host tile memory for c
//Device memory allocations
cudaStat = hipMalloc((void**)&d_a,tileLength*tileLength*sizeof(*a)); // device memory alloc for a
if(cudaStat != hipSuccess)
printf("Cuda A Malloc: %s\n", hipGetErrorString(cudaStat));
cudaStat = hipMalloc((void**)&d_b,tileLength*tileLength*sizeof(*b)); // device memory alloc for b
if(cudaStat != hipSuccess)
printf("Cuda B Malloc: %s\n", hipGetErrorString(cudaStat));
cudaStat = hipMalloc((void**)&d_c,tileLength*tileLength*sizeof(*c)); // device memory alloc for c
if(cudaStat != hipSuccess)
printf("Cuda malloc Error: %s\n", hipGetErrorString(cudaStat));
for(int i = 0; i < m; i++)
{
//memcpy of tile C for host to device (POSSIBLE AREA FOR TIMING)
// cudaStat = hipMemcpy(d_c, Tc, tileLength*tileLength*sizeof(float), hipMemcpyHostToDevice);
// if(cudaStat != hipSuccess)
// printf("Cuda malloc Error: %s\n", hipGetErrorString(cudaStat));
for(int j = 0; j < m; j++)
{
gettimeofday(©Time, NULL);
avgCopTemp = (((double) copyTime.tv_sec) + ((double) copyTime.tv_usec)/1000000);
//Fill tileA & tileB with elements from matrix A
// printf("Tile A iteration: i=%d, j=%d\n", i,j);
fillA(Ta, a, tileLength, i, j, m);
// printf("Tile B iteration: i=%d, j=%d\n", i,j);
fillB(Tb, b, tileLength, i, j, m);
//memcpy TileA and TileB froim host to device
cudaStat = hipMemcpy(d_a,Ta,tileLength*tileLength*sizeof(float),hipMemcpyHostToDevice);
if(cudaStat != hipSuccess)
printf("Cuda memcpy: %s\n", hipGetErrorString(cudaStat));
cudaStat = hipMemcpy(d_b, Tb, tileLength*tileLength*sizeof(float),hipMemcpyHostToDevice);
if(cudaStat != hipSuccess)
printf("Cuda memcpy Error: %s\n", hipGetErrorString(cudaStat));
// stat = hipblasSetMatrix(tileLength,tileLength,sizeof(*Ta),Ta,tileLength,d_a,tileLength);
// if(stat != HIPBLAS_STATUS_SUCCESS)
// printf("Cublas to Matrix A Error: %s\n", cublasGetErrorString(stat));
// stat = hipblasSetMatrix(tileLength,tileLength,sizeof(*Tb),Tb,tileLength,d_b,tileLength);
// if(stat != HIPBLAS_STATUS_SUCCESS)
// printf("Cublas to Matrix B Error: %s\n", cublasGetErrorString(stat));
// stat = hipblasSetMatrix(tileLength,tileLength,sizeof(*Tc),Tc,tileLength,d_c,tileLength);
// if(stat != HIPBLAS_STATUS_SUCCESS)
// printf("Cublas to Matrix C Error: %s\n", cublasGetErrorString(stat));
gettimeofday(©Time, NULL);
copy_elapsed_time += (((double) copyTime.tv_sec) + ((double) copyTime.tv_usec)/1000000) - avgCopTemp;
// copy_elapsed_time = copy_elapsed_time + avgCopTemp;
gettimeofday(&mathTime, NULL);
avgMathTemp = (((double) mathTime.tv_sec) + ((double) mathTime.tv_usec)/1000000);
stat = hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,tileLength,tileLength,tileLength,&al,d_a,tileLength,d_b,tileLength,&bet,d_c,tileLength);
if(stat != HIPBLAS_STATUS_SUCCESS)
printf("Cublas Matrix Multiplication Error: %s\n", cublasGetErrorString(stat));
// stat = hipblasGetMatrix (tileLength,tileLength, sizeof(*Tc),d_c,tileLength,c,tileLength); // cp d_c - >c
gettimeofday(&mathTime, NULL);
math_elapsed_time += (((double) mathTime.tv_sec) + ((double) mathTime.tv_usec)/1000000) - avgMathTemp;
hipMemcpy(Tc,d_c, tileLength*tileLength*sizeof(float),hipMemcpyDeviceToHost);
storeC(Tc,c, tileLength, i, j, m);
++numberofIter;
}
}
printf("Copy Execution time : %lf sec.\n", (copy_elapsed_time/numberofIter));
printf("Math Execution time : %lf sec.\n", math_elapsed_time/numberofIter);
// printf("numiter %d:\n", timingNumIterations);
// printf("Copy Execution time : %lf sec.\n", copy_elapsed_time/(timingNumIterations));
//Free device and host memory for next iteration
cudaStat = hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipblasDestroy(handle);
free(Ta);
free(Tb);
free(Tc);
}
/**********************************************************************
* function name: main
* description: test and compare8
* parameters:
* none
* return: none
**********************************************************************/
int main(int argc, char** argv) {
// hipblasStatus_t stat; // CUBLAS functions statusx
// hipblasHandle_t handle; // CUBLAS context
int m=20000;// a - mxk matrix
int n=20000;// b - kxn matrix
int k=20000;// c - mxn matrix
// Set status variables
struct timeval totalTime;
double total_elapsed_time;
struct timeval time;
double elapsed_time;
gettimeofday(&totalTime, NULL);
total_elapsed_time = (((double) totalTime.tv_sec) + ((double) totalTime.tv_usec)/1000000);
// Allocate memory in host RAM
float *a; // mxk matrix a on the host
float *b; // kxn matrix b on the host
float *c; // mxn matrix c on the host
a = (float*) malloc(m*k* sizeof(float)); // host memory for a
b = (float*) malloc(k*n* sizeof(float)); // host memory for b
c = (float*) malloc(m*n* sizeof(float)); // host memory for c
/* Assign Random Variables to the matrecies */
// srand(3333);
int val = 1;
// random initialize matrix A [mxk]
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
a[i * n + j] =val++;
}
}
val = 1;
// random initialize matrix B [kxn]
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
b[i * k + j] = val++;
}
}
gettimeofday(&time, NULL);
elapsed_time = (((double) time.tv_sec) + ((double) time.tv_usec)/1000000);
matrixCpy(a,b,c,10000,2);
gettimeofday(&time, NULL);
elapsed_time = (((double) time.tv_sec) + ((double) time.tv_usec)/1000000) - elapsed_time;
printf("Computation Execution time : %lf sec.\n", elapsed_time);
// int i,j;
// print matrix A
// printf("matA matrix: \n");
// for (i = 0; i < m; i++) {
// for (j = 0; j < n; j++) {
// //printf("[%d][%d]:%d, ", i, j, a[i*k + j]);
// printf(" %f ", a[i*k + j]);
// }
// printf("\n");
// }
// // print matrix B
// printf("\nmatB matrix: \n");
// for (i = 0; i < n; i++) {
// for (j = 0; j < k; j++) {
// //printf("[%d][%d]:%d, ", i, j, b[i*k + j]);
// printf(" %f ", b[i*k + j]);
// }
// printf("\n");
// }
// // print result matrix
// printf("\nResult matrix: \n");
// for (i = 0; i < m; i++) {
// for (j = 0; j < k; j++) {
// //printf("[%d][%d]:%d, ", i, j, c[i*k + j]);
// printf(" %f ", c[i*k + j]);
// }
// printf("\n");
// }
// free memory
free(a);
free(b);
free(c);
gettimeofday(&totalTime, NULL);
total_elapsed_time = (((double) totalTime.tv_sec) + ((double) totalTime.tv_usec)/1000000) - total_elapsed_time;
printf("Execution Total Time : %lf sec.\n", total_elapsed_time);
return 0;
}
|
c4a8826090bc242e201457783898478373023768.cu
|
/*
* file name: TilingMatrixV2.c
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
* nvcc TilingMatrixV2.c -lcublas -o TilingMatrixTest
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
* function name: matrixTriUpper
* description: sets a matrix to an upper bound triangle matrix
* parameters:
* &a GPU device pointer to a m X n matrix (A)
* Note:
*
* return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
* function name: cublasGetErrorString
* description: gets the cublas string error codes for printing
* parameters:
* error a cublas error status enum
* return: char pointer (string)
**********************************************************************/
const char* cublasGetErrorString(cublasStatus_t status)
{
switch(status)
{
case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "unknown error";
}
/**************************************************************
* function name: storeC
* description: copies the final answers of tileC back to the corresponding indices of of Matrix C
*
* parameters:
* &tilec pointer to pre-allocated (tileLength X tileLength) matrix
* &matrixc pointer to large (m X m)matrix B
* int tileLength predefined length of tile
* int i caller outer loop value (helps define starting ROW index for tile)
* int j caller inner loop value (helps define starting COLUMN for tile)
*
****************************************************************/
void storeC (float *tileC, float *matrixC, int tileLength, int i, int j, int numTiles){
//pointer declarations
for(int Ti = (tileLength*i); Ti < (tileLength*i)+tileLength; Ti++){
for(int Tj = (tileLength*j); Tj < (tileLength*j) + tileLength; Tj++ ){
matrixC[(Ti * numTiles) + Tj] = tileC[(Ti *numTiles *tileLength)+Tj];
// printf("[%0.1f] ", tileC[(Ti *numTiles *tileLength)+Tj]);
}
// printf("\n");
}
// printf("\n");
}
/**************************************************************
* function name: fillA
* description: populates TileA with elements of matrix A that correspond to the to the correct starting indices and boundries of the tile.
*
* parameters:
* &tileA pointer to pre-allocated tileLength X tileLength matrix
* &matrixA pointer to large matrix A
* int tileLength predefined length of tile
* int i caller outer loop value (helps define starting ROW index for tile)
* int j caller inner loop value (helps define starting COLUMN for tile)
*
****************************************************************/
void fillA(float *tileA, float *matrixA, int tileLength, int i, int j, int numTiles){
//pointer declarations
for(int Ti = (tileLength*i); Ti < (tileLength*i)+tileLength; Ti++){
for(int Tj = (tileLength*j); Tj < (tileLength*j) + tileLength; Tj++ ){
tileA[(Ti * tileLength) + Tj] = matrixA[(Ti *numTiles *tileLength) + Tj];
// printf("[%0.1f] ", tileA[(Ti * tileLength) + Tj]);
}
// printf("\n");
}
// printf("\n");
}
/**************************************************************
* function name: fillB
* description: populates TileB with elements of matrix B that correspond to the to the correct starting indices and boundries of the
* tile.
*
* parameters:
* &tileB pointer to pre-allocated (tileLength X tileLength) matrix
* &matrixB pointer to large (m X m)matrix B
* int tileLength predefined length of tile
* int i caller outer loop value (helps define starting COLUMN index for tile)
* int j caller inner loop value (helps define starting ROW for tile)
*
****************************************************************/
void fillB(float *tileB, float *matrixB, int tileLength, int i, int j, int numTiles){
//pointer declarations
for(int Ti = (tileLength*j); Ti < (tileLength*j)+tileLength; Ti++){
for(int Tj = (tileLength*i); Tj < (tileLength*i) + tileLength; Tj++ ){
tileB[Ti * tileLength + Tj] = matrixB[Ti * numTiles* tileLength + Tj];
// printf("[%0.1f] ", tileB[Ti * tileLength + Tj]);
}
// printf("\n");
}
// printf("\n");
}
/**********************************************************************
* function name: matrixCpy
* description: Iterates through large (m X m) matricies A and B, continually creating smaller (tileLength * tileLength) matricies Ta and Tb that will be used by device to produce matrix C containing computed answers of MM of matrices A and B.
* parameters: handle
* &a GPU device pointer to a m X m matrix (A)
* &b GPU device pointer to a m X m matrix (B)
* &c GPU device output purpose pointer to a m X m matrix (C)
* int tileLength predefined max length of tile
* int m # of tiles that divide the length of matrices A & B
*
* return: none
* TODO implement kernel calls of cuBlas and TMM, implement another function or code that tranfers results of C tile to matrix C. FIGURE OUT WHY fillA and fillB piss off the compiler
**********************************************************************/
void matrixCpy(float *a, float *b, float *c, int tileLength, int m) {
cudaError_t cudaStat; // cudaMalloc & cudaFree status
cublasStatus_t stat; // CUBLAS functions statusx
cublasHandle_t handle; // CUBLAS context
float al =1.0f; // al =1
float bet =1.0f; // bet =1
float *Ta,*Tb,*Tc, *d_a, *d_b, *d_c; // device and host TILE memory declaration
struct timeval copyTime;
double copy_elapsed_time;
struct timeval mathTime;
double math_elapsed_time;
int avgCopTemp = 0;
int avgMathTemp = 0;
int numberofIter = 0;
stat = cublasCreate(&handle); // initialize CUBLAS context
if(stat != CUBLAS_STATUS_SUCCESS)
printf("Cublas Create Error: %s\n", cublasGetErrorString(stat));
//Host memory alocation
Ta = (float*) malloc(tileLength*tileLength*sizeof(float)*3); // host tile memory alloc for a
Tb = (float*) malloc(tileLength*tileLength*sizeof(float)*3); // host tile memory alloc for b
Tc = (float*) malloc(tileLength*tileLength*sizeof(float)*3); // host tile memory for c
//Device memory allocations
cudaStat = cudaMalloc((void**)&d_a,tileLength*tileLength*sizeof(*a)); // device memory alloc for a
if(cudaStat != cudaSuccess)
printf("Cuda A Malloc: %s\n", cudaGetErrorString(cudaStat));
cudaStat = cudaMalloc((void**)&d_b,tileLength*tileLength*sizeof(*b)); // device memory alloc for b
if(cudaStat != cudaSuccess)
printf("Cuda B Malloc: %s\n", cudaGetErrorString(cudaStat));
cudaStat = cudaMalloc((void**)&d_c,tileLength*tileLength*sizeof(*c)); // device memory alloc for c
if(cudaStat != cudaSuccess)
printf("Cuda malloc Error: %s\n", cudaGetErrorString(cudaStat));
for(int i = 0; i < m; i++)
{
//memcpy of tile C for host to device (POSSIBLE AREA FOR TIMING)
// cudaStat = cudaMemcpy(d_c, Tc, tileLength*tileLength*sizeof(float), cudaMemcpyHostToDevice);
// if(cudaStat != cudaSuccess)
// printf("Cuda malloc Error: %s\n", cudaGetErrorString(cudaStat));
for(int j = 0; j < m; j++)
{
gettimeofday(©Time, NULL);
avgCopTemp = (((double) copyTime.tv_sec) + ((double) copyTime.tv_usec)/1000000);
//Fill tileA & tileB with elements from matrix A
// printf("Tile A iteration: i=%d, j=%d\n", i,j);
fillA(Ta, a, tileLength, i, j, m);
// printf("Tile B iteration: i=%d, j=%d\n", i,j);
fillB(Tb, b, tileLength, i, j, m);
//memcpy TileA and TileB froim host to device
cudaStat = cudaMemcpy(d_a,Ta,tileLength*tileLength*sizeof(float),cudaMemcpyHostToDevice);
if(cudaStat != cudaSuccess)
printf("Cuda memcpy: %s\n", cudaGetErrorString(cudaStat));
cudaStat = cudaMemcpy(d_b, Tb, tileLength*tileLength*sizeof(float),cudaMemcpyHostToDevice);
if(cudaStat != cudaSuccess)
printf("Cuda memcpy Error: %s\n", cudaGetErrorString(cudaStat));
// stat = cublasSetMatrix(tileLength,tileLength,sizeof(*Ta),Ta,tileLength,d_a,tileLength);
// if(stat != CUBLAS_STATUS_SUCCESS)
// printf("Cublas to Matrix A Error: %s\n", cublasGetErrorString(stat));
// stat = cublasSetMatrix(tileLength,tileLength,sizeof(*Tb),Tb,tileLength,d_b,tileLength);
// if(stat != CUBLAS_STATUS_SUCCESS)
// printf("Cublas to Matrix B Error: %s\n", cublasGetErrorString(stat));
// stat = cublasSetMatrix(tileLength,tileLength,sizeof(*Tc),Tc,tileLength,d_c,tileLength);
// if(stat != CUBLAS_STATUS_SUCCESS)
// printf("Cublas to Matrix C Error: %s\n", cublasGetErrorString(stat));
gettimeofday(©Time, NULL);
copy_elapsed_time += (((double) copyTime.tv_sec) + ((double) copyTime.tv_usec)/1000000) - avgCopTemp;
// copy_elapsed_time = copy_elapsed_time + avgCopTemp;
gettimeofday(&mathTime, NULL);
avgMathTemp = (((double) mathTime.tv_sec) + ((double) mathTime.tv_usec)/1000000);
stat = cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,tileLength,tileLength,tileLength,&al,d_a,tileLength,d_b,tileLength,&bet,d_c,tileLength);
if(stat != CUBLAS_STATUS_SUCCESS)
printf("Cublas Matrix Multiplication Error: %s\n", cublasGetErrorString(stat));
// stat = cublasGetMatrix (tileLength,tileLength, sizeof(*Tc),d_c,tileLength,c,tileLength); // cp d_c - >c
gettimeofday(&mathTime, NULL);
math_elapsed_time += (((double) mathTime.tv_sec) + ((double) mathTime.tv_usec)/1000000) - avgMathTemp;
cudaMemcpy(Tc,d_c, tileLength*tileLength*sizeof(float),cudaMemcpyDeviceToHost);
storeC(Tc,c, tileLength, i, j, m);
++numberofIter;
}
}
printf("Copy Execution time : %lf sec.\n", (copy_elapsed_time/numberofIter));
printf("Math Execution time : %lf sec.\n", math_elapsed_time/numberofIter);
// printf("numiter %d:\n", timingNumIterations);
// printf("Copy Execution time : %lf sec.\n", copy_elapsed_time/(timingNumIterations));
//Free device and host memory for next iteration
cudaStat = cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cublasDestroy(handle);
free(Ta);
free(Tb);
free(Tc);
}
/**********************************************************************
* function name: main
* description: test and compare8
* parameters:
* none
* return: none
**********************************************************************/
int main(int argc, char** argv) {
// cublasStatus_t stat; // CUBLAS functions statusx
// cublasHandle_t handle; // CUBLAS context
int m=20000;// a - mxk matrix
int n=20000;// b - kxn matrix
int k=20000;// c - mxn matrix
// Set status variables
struct timeval totalTime;
double total_elapsed_time;
struct timeval time;
double elapsed_time;
gettimeofday(&totalTime, NULL);
total_elapsed_time = (((double) totalTime.tv_sec) + ((double) totalTime.tv_usec)/1000000);
// Allocate memory in host RAM
float *a; // mxk matrix a on the host
float *b; // kxn matrix b on the host
float *c; // mxn matrix c on the host
a = (float*) malloc(m*k* sizeof(float)); // host memory for a
b = (float*) malloc(k*n* sizeof(float)); // host memory for b
c = (float*) malloc(m*n* sizeof(float)); // host memory for c
/* Assign Random Variables to the matrecies */
// srand(3333);
int val = 1;
// random initialize matrix A [mxk]
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
a[i * n + j] =val++;
}
}
val = 1;
// random initialize matrix B [kxn]
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
b[i * k + j] = val++;
}
}
gettimeofday(&time, NULL);
elapsed_time = (((double) time.tv_sec) + ((double) time.tv_usec)/1000000);
matrixCpy(a,b,c,10000,2);
gettimeofday(&time, NULL);
elapsed_time = (((double) time.tv_sec) + ((double) time.tv_usec)/1000000) - elapsed_time;
printf("Computation Execution time : %lf sec.\n", elapsed_time);
// int i,j;
// print matrix A
// printf("matA matrix: \n");
// for (i = 0; i < m; i++) {
// for (j = 0; j < n; j++) {
// //printf("[%d][%d]:%d, ", i, j, a[i*k + j]);
// printf(" %f ", a[i*k + j]);
// }
// printf("\n");
// }
// // print matrix B
// printf("\nmatB matrix: \n");
// for (i = 0; i < n; i++) {
// for (j = 0; j < k; j++) {
// //printf("[%d][%d]:%d, ", i, j, b[i*k + j]);
// printf(" %f ", b[i*k + j]);
// }
// printf("\n");
// }
// // print result matrix
// printf("\nResult matrix: \n");
// for (i = 0; i < m; i++) {
// for (j = 0; j < k; j++) {
// //printf("[%d][%d]:%d, ", i, j, c[i*k + j]);
// printf(" %f ", c[i*k + j]);
// }
// printf("\n");
// }
// free memory
free(a);
free(b);
free(c);
gettimeofday(&totalTime, NULL);
total_elapsed_time = (((double) totalTime.tv_sec) + ((double) totalTime.tv_usec)/1000000) - total_elapsed_time;
printf("Execution Total Time : %lf sec.\n", total_elapsed_time);
return 0;
}
|
663eb88e994a65c332e9201755157a5bdf3c5f8b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This is the cuda version of 2d-convolution with multiple thread blocks (270ms).
In this problem, our output with size of [N, F, H_, W_];
So we divides the task into (H_ * W_) blocks, and each nlock has (N * F) threads.
*/
#include "matr_def.h"
__global__ void conv2d_cuda_optim_kernel(float *out_matr, float *fm_matr, float *kn_matr,
int in_channel, int out_channel, int height, int width,
int ksize_x, int ksize_y);
void Conv2D_cuda_optim(Matrix &out, Matrix fm, Matrix kn) {
fm.cuda(); kn.cuda(); out.cuda();
dim3 block_sz(out.d1, out.d2);
dim3 grid_sz(out.d3, out.d4);
hipLaunchKernelGGL(( conv2d_cuda_optim_kernel), dim3(grid_sz),dim3(block_sz), 0, 0, out.element, fm.element, kn.element,
kn.d2, kn.d1, fm.d3, fm.d4, kn.d3, kn.d4);
out.cpu();
}
__global__ void conv2d_cuda_optim_kernel(float *out_matr, float *fm_matr, float *kn_matr,
int in_channel, int out_channel, int height, int width,
int ksize_x, int ksize_y) {
int batch_id = threadIdx.x, channel_id = threadIdx.y;
int row = blockIdx.x, col = blockIdx.y;
float cell_value = 0;
for (int c = 0; c < in_channel; c++) // each in-channel
for (int i = 0; i < ksize_x; i++)
for (int j = 0; j < ksize_y; j++) // each lacation of a kernel
cell_value += kn_matr[channel_id*in_channel*ksize_x*ksize_y + c*ksize_x*ksize_y + i*ksize_y + j] *
fm_matr[batch_id*in_channel*height*width + c*height*width + (row+i)*width + (col+j)];
// printf("[%d,%d,%d,%d] = %f\n", batch_id, channel_id, row, col, cell_value);
out_matr[batch_id*out_channel*(height - ksize_x + 1)*(width - ksize_y + 1) +
channel_id*(height - ksize_x + 1)*(width - ksize_y + 1) +
row*(width - ksize_y + 1) + col] = cell_value;
}
int main() {
//Initialize Matrix
Matrix fm(N, C, H, W), kn(F, C, K, K);
Matrix out(N, F, H-K+1, W-K+1);
Matrix truth(N, F, H-K+1, W-K+1);
fm.fill_value(1.0);
kn.fill_value(0.5);
truth.fill_value(288.0);
printf("The feature map is filled with %f;\n",*fm.get(1,2,3,4));
printf("The kernel is filled with %f;\n",*kn.get(1,2,3,4));
clock_t st,ed;
st = clock();
Conv2D_cuda_optim(out, fm, kn);
ed = clock();
printf("It takes %f ms to calculate the convolution... ", (double)(ed-st)/CLOCKS_PER_SEC * 1000);
if (out == truth)
printf("Result is correct! (%f)\n", *out.get(1,2,3,4));
else
printf("Result is wrong! (%f)\n", *out.get(1,2,3,4));
}
|
663eb88e994a65c332e9201755157a5bdf3c5f8b.cu
|
/*
This is the cuda version of 2d-convolution with multiple thread blocks (270ms).
In this problem, our output with size of [N, F, H_, W_];
So we divides the task into (H_ * W_) blocks, and each nlock has (N * F) threads.
*/
#include "matr_def.h"
__global__ void conv2d_cuda_optim_kernel(float *out_matr, float *fm_matr, float *kn_matr,
int in_channel, int out_channel, int height, int width,
int ksize_x, int ksize_y);
void Conv2D_cuda_optim(Matrix &out, Matrix fm, Matrix kn) {
fm.cuda(); kn.cuda(); out.cuda();
dim3 block_sz(out.d1, out.d2);
dim3 grid_sz(out.d3, out.d4);
conv2d_cuda_optim_kernel<<<grid_sz,block_sz>>>(out.element, fm.element, kn.element,
kn.d2, kn.d1, fm.d3, fm.d4, kn.d3, kn.d4);
out.cpu();
}
__global__ void conv2d_cuda_optim_kernel(float *out_matr, float *fm_matr, float *kn_matr,
int in_channel, int out_channel, int height, int width,
int ksize_x, int ksize_y) {
int batch_id = threadIdx.x, channel_id = threadIdx.y;
int row = blockIdx.x, col = blockIdx.y;
float cell_value = 0;
for (int c = 0; c < in_channel; c++) // each in-channel
for (int i = 0; i < ksize_x; i++)
for (int j = 0; j < ksize_y; j++) // each lacation of a kernel
cell_value += kn_matr[channel_id*in_channel*ksize_x*ksize_y + c*ksize_x*ksize_y + i*ksize_y + j] *
fm_matr[batch_id*in_channel*height*width + c*height*width + (row+i)*width + (col+j)];
// printf("[%d,%d,%d,%d] = %f\n", batch_id, channel_id, row, col, cell_value);
out_matr[batch_id*out_channel*(height - ksize_x + 1)*(width - ksize_y + 1) +
channel_id*(height - ksize_x + 1)*(width - ksize_y + 1) +
row*(width - ksize_y + 1) + col] = cell_value;
}
int main() {
//Initialize Matrix
Matrix fm(N, C, H, W), kn(F, C, K, K);
Matrix out(N, F, H-K+1, W-K+1);
Matrix truth(N, F, H-K+1, W-K+1);
fm.fill_value(1.0);
kn.fill_value(0.5);
truth.fill_value(288.0);
printf("The feature map is filled with %f;\n",*fm.get(1,2,3,4));
printf("The kernel is filled with %f;\n",*kn.get(1,2,3,4));
clock_t st,ed;
st = clock();
Conv2D_cuda_optim(out, fm, kn);
ed = clock();
printf("It takes %f ms to calculate the convolution... ", (double)(ed-st)/CLOCKS_PER_SEC * 1000);
if (out == truth)
printf("Result is correct! (%f)\n", *out.get(1,2,3,4));
else
printf("Result is wrong! (%f)\n", *out.get(1,2,3,4));
}
|
51a50b3bf85b0d35a786ac61292cf09944ca92db.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) {
for (int i=0; i < var_1; ++i) {
if (comp >= atan2f(acosf(+0.0f - (-1.6874E-42f / (-1.8060E36f / (+1.6270E-44f + var_2)))), (+1.6197E34f - +1.4289E34f * +0.0f / -0.0f))) {
comp = (+1.8114E-37f / ceilf(var_4 + var_5 / +1.5925E34f - var_6));
if (comp == coshf(-1.7999E-35f)) {
comp = var_7 - var_8 - asinf(ldexpf(-1.3128E35f, 2));
}
for (int i=0; i < var_3; ++i) {
comp = ldexpf((var_9 - var_10 - var_11), 2);
comp = +1.6771E-35f * var_12 / +1.6151E-19f;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13);
hipDeviceSynchronize();
return 0;
}
|
51a50b3bf85b0d35a786ac61292cf09944ca92db.cu
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) {
for (int i=0; i < var_1; ++i) {
if (comp >= atan2f(acosf(+0.0f - (-1.6874E-42f / (-1.8060E36f / (+1.6270E-44f + var_2)))), (+1.6197E34f - +1.4289E34f * +0.0f / -0.0f))) {
comp = (+1.8114E-37f / ceilf(var_4 + var_5 / +1.5925E34f - var_6));
if (comp == coshf(-1.7999E-35f)) {
comp = var_7 - var_8 - asinf(ldexpf(-1.3128E35f, 2));
}
for (int i=0; i < var_3; ++i) {
comp = ldexpf((var_9 - var_10 - var_11), 2);
comp = +1.6771E-35f * var_12 / +1.6151E-19f;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13);
cudaDeviceSynchronize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.